corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-663801
2410.00121
Using fractal dimension to predict the risk of intra cranial aneurysm rupture with machine learning
<|reference_start|>Using fractal dimension to predict the risk of intra cranial aneurysm rupture with machine learning: Intracranial aneurysms (IAs) that rupture result in significant morbidity and mortality. While traditional risk models such as the PHASES score are useful in clinical decision making, machine learning (ML) models offer the potential to provide more accuracy. In this study, we compared the performance of four different machine learning algorithms Random Forest (RF), XGBoost (XGB), Support Vector Machine (SVM), and Multi Layer Perceptron (MLP) on clinical and radiographic features to predict rupture status of intracranial aneurysms. Among the models, RF achieved the highest accuracy (85%) with balanced precision and recall, while MLP had the lowest overall performance (accuracy of 63%). Fractal dimension ranked as the most important feature for model performance across all models.<|reference_end|>
arxiv
@article{elavarthi2024using, title={Using fractal dimension to predict the risk of intra cranial aneurysm rupture with machine learning}, author={Pradyumna Elavarthi, Anca Ralescu, Mark D. Johnson, Charles J. Prestigiacomo}, journal={arXiv preprint arXiv:2410.00121}, year={2024}, archivePrefix={arXiv}, eprint={2410.00121}, primaryClass={cs.LG} }
elavarthi2024using
arxiv-663802
2410.00122
Additively Manufactured Open-Source Quadruped Robots for Multi-Robot SLAM Applications
<|reference_start|>Additively Manufactured Open-Source Quadruped Robots for Multi-Robot SLAM Applications: This work presents the design and development of the quadruped robot Squeaky to be used as a research and learning platform for single and multi-SLAM robotics, computer vision, and reinforcement learning. Affordable robots are becoming necessary when expanding from single to multi-robot applications, as the cost can increase exponentially as fleet size increases. SLAM is essential for a robot to perceive and localize within its environment to perform applications such as cave exploration, disaster assistance, and remote inspection. For improved efficiency, a fleet of robots can be employed to combine maps for multi-robot SLAM. Squeaky is an affordable quadrupedal robot, designed to have easily adaptable hardware and software, capable of creating a merged map under a shared network from multiple robots, and available open-source for the benefit of the research community.<|reference_end|>
arxiv
@article{fuge2024additively, title={Additively Manufactured Open-Source Quadruped Robots for Multi-Robot SLAM Applications}, author={Zachary Fuge, Benjamin Beiter, Alexander Leonessa}, journal={arXiv preprint arXiv:2410.00122}, year={2024}, archivePrefix={arXiv}, eprint={2410.00122}, primaryClass={cs.RO} }
fuge2024additively
arxiv-663803
2410.00126
Resonance Reduction Against Adversarial Attacks in Dynamic Networks via Eigenspectrum Optimization
<|reference_start|>Resonance Reduction Against Adversarial Attacks in Dynamic Networks via Eigenspectrum Optimization: Resonance is a well-known phenomenon that happens in systems with second order dynamics. In this paper we address the fundamental question of making a network robust to signal being periodically pumped into it at or near a resonant frequency by an adversarial agent with the aim of saturating the network with the signal. Towards this goal, we develop the notion of network vulnerability, which is measured by the expected resonance amplitude on the network under a stochastically modeled adversarial attack. Assuming a second order dynamics model based on the network graph Laplacian matrix and a known stochastic model for the adversarial attack, we propose two methods for minimizing the network vulnerability that leverage the principle of eigenspectrum optimization. We provide extensive numerical results analyzing the effects of both methods.<|reference_end|>
arxiv
@article{sahin2024resonance, title={Resonance Reduction Against Adversarial Attacks in Dynamic Networks via Eigenspectrum Optimization}, author={Alp Sahin, Nicolas Kozachuk, Rick S. Blum and Subhrajit Bhattacharya}, journal={arXiv preprint arXiv:2410.00126}, year={2024}, archivePrefix={arXiv}, eprint={2410.00126}, primaryClass={cs.SI math.OC} }
sahin2024resonance
arxiv-663804
2410.00129
Cartesian Genetic Programming Approach for Designing Convolutional Neural Networks
<|reference_start|>Cartesian Genetic Programming Approach for Designing Convolutional Neural Networks: The present study covers an approach to neural architecture search (NAS) using Cartesian genetic programming (CGP) for the design and optimization of Convolutional Neural Networks (CNNs). In designing artificial neural networks, one crucial aspect of the innovative approach is suggesting a novel neural architecture. Currently used architectures have mostly been developed manually by human experts, which is a time-consuming and error-prone process. In this work, we use pure Genetic Programming Approach to design CNNs, which employs only one genetic operation, i.e., mutation. In the course of preliminary experiments, our methodology yields promising results.<|reference_end|>
arxiv
@article{krzywda2024cartesian, title={Cartesian Genetic Programming Approach for Designing Convolutional Neural Networks}, author={Maciej Krzywda, Szymon {L}ukasik and Amir Gandomi H}, journal={Progress in Polish Artificial Intelligence Research, pp. 512-519, 2024}, year={2024}, doi={10.17388/WUT.2024.0002.MiNI}, archivePrefix={arXiv}, eprint={2410.00129}, primaryClass={cs.NE cs.AI cs.LG} }
krzywda2024cartesian
arxiv-663805
2410.00131
Fisher Information-based Efficient Curriculum Federated Learning with Large Language Models
<|reference_start|>Fisher Information-based Efficient Curriculum Federated Learning with Large Language Models: As a promising paradigm to collaboratively train models with decentralized data, Federated Learning (FL) can be exploited to fine-tune Large Language Models (LLMs). While LLMs correspond to huge size, the scale of the training data significantly increases, which leads to tremendous amounts of computation and communication costs. The training data is generally non-Independent and Identically Distributed (non-IID), which requires adaptive data processing within each device. Although Low Rank Adaptation (LoRA) can significantly reduce the scale of parameters to update in the fine-tuning process, it still takes unaffordable time to transfer the low-rank parameters of all the layers in LLMs. In this paper, we propose a Fisher Information-based Efficient Curriculum Federated Learning framework (FibecFed) with two novel methods, i.e., adaptive federated curriculum learning and efficient sparse parameter update. First, we propose a fisher information-based method to adaptively sample data within each device to improve the effectiveness of the FL fine-tuning process. Second, we dynamically select the proper layers for global aggregation and sparse parameters for local update with LoRA so as to improve the efficiency of the FL fine-tuning process. Extensive experimental results based on 10 datasets demonstrate that FibecFed yields excellent performance (up to 45.35% in terms of accuracy) and superb fine-tuning speed (up to 98.61% faster) compared with 17 baseline approaches).<|reference_end|>
arxiv
@article{liu2024fisher, title={Fisher Information-based Efficient Curriculum Federated Learning with Large Language Models}, author={Ji Liu, Jiaxiang Ren, Ruoming Jin, Zijie Zhang, Yang Zhou, Patrick Valduriez, Dejing Dou}, journal={arXiv preprint arXiv:2410.00131}, year={2024}, archivePrefix={arXiv}, eprint={2410.00131}, primaryClass={cs.LG cs.AI cs.CL cs.DC} }
liu2024fisher
arxiv-663806
2410.00132
CVVLSNet: Vehicle Location and Speed Estimation Using Partial Connected Vehicle Trajectory Data
<|reference_start|>CVVLSNet: Vehicle Location and Speed Estimation Using Partial Connected Vehicle Trajectory Data: Real-time estimation of vehicle locations and speeds is crucial for developing many beneficial transportation applications in traffic management and control, e.g., adaptive signal control. Recent advances in communication technologies facilitate the emergence of connected vehicles (CVs), which can share traffic information with nearby CVs or infrastructures. At the early stage of connectivity, only a portion of vehicles are CVs. The locations and speeds for those non-CVs (NCs) are not accessible and must be estimated to obtain the full traffic information. To address the above problem, this paper proposes a novel CV-based Vehicle Location and Speed estimation network, CVVLSNet, to simultaneously estimate the vehicle locations and speeds exclusively using partial CV trajectory data. A road cell occupancy (RCO) method is first proposed to represent the variable vehicle state information. Spatiotemporal interactions can be integrated by simply fusing the RCO representations. Then, CVVLSNet, taking the Coding-RAte TransformEr (CRATE) network as a backbone, is introduced to estimate the vehicle locations and speeds. Moreover, physical vehicle size constraints are also considered in loss functions. Extensive experiments indicate that the proposed method significantly outperformed the existing method under various CV penetration rates, signal timings, and volume-to-capacity ratios.<|reference_end|>
arxiv
@article{ye2024cvvlsnet:, title={CVVLSNet: Vehicle Location and Speed Estimation Using Partial Connected Vehicle Trajectory Data}, author={Jiachen Ye, Dingyu Wang, Shaocheng Jia, Xin Pei, Zi Yang, Yi Zhang, and S.C. Wong}, journal={arXiv preprint arXiv:2410.00132}, year={2024}, archivePrefix={arXiv}, eprint={2410.00132}, primaryClass={cs.CV} }
ye2024cvvlsnet:
arxiv-663807
2410.00134
Semantic-Driven Topic Modeling Using Transformer-Based Embeddings and Clustering Algorithms
<|reference_start|>Semantic-Driven Topic Modeling Using Transformer-Based Embeddings and Clustering Algorithms: Topic modeling is a powerful technique to discover hidden topics and patterns within a collection of documents without prior knowledge. Traditional topic modeling and clustering-based techniques encounter challenges in capturing contextual semantic information. This study introduces an innovative end-to-end semantic-driven topic modeling technique for the topic extraction process, utilizing advanced word and document embeddings combined with a powerful clustering algorithm. This semantic-driven approach represents a significant advancement in topic modeling methodologies. It leverages contextual semantic information to extract coherent and meaningful topics. Specifically, our model generates document embeddings using pre-trained transformer-based language models, reduces the dimensions of the embeddings, clusters the embeddings based on semantic similarity, and generates coherent topics for each cluster. Compared to ChatGPT and traditional topic modeling algorithms, our model provides more coherent and meaningful topics.<|reference_end|>
arxiv
@article{mersha2024semantic-driven, title={Semantic-Driven Topic Modeling Using Transformer-Based Embeddings and Clustering Algorithms}, author={Melkamu Abay Mersha, Mesay Gemeda yigezu, Jugal Kalita}, journal={ACLing2024 6th International Conference on AI in Computational Linguistics}, year={2024}, archivePrefix={arXiv}, eprint={2410.00134}, primaryClass={cs.CL cs.AI} }
mersha2024semantic-driven
arxiv-663808
2410.00145
Constraint-Aware Refinement for Safety Verification of Neural Feedback Loops
<|reference_start|>Constraint-Aware Refinement for Safety Verification of Neural Feedback Loops: Neural networks (NNs) are becoming increasingly popular in the design of control pipelines for autonomous systems. However, since the performance of NNs can degrade in the presence of out-of-distribution data or adversarial attacks, systems that have NNs in their control pipelines, i.e., neural feedback loops (NFLs), need safety assurances before they can be applied in safety-critical situations. Reachability analysis offers a solution to this problem by calculating reachable sets that bound the possible future states of an NFL and can be checked against dangerous regions of the state space to verify that the system does not violate safety constraints. Since exact reachable sets are generally intractable to calculate, reachable set over approximations (RSOAs) are typically used. The problem with RSOAs is that they can be overly conservative, making it difficult to verify the satisfaction of safety constraints, especially over long time horizons or for highly nonlinear NN control policies. Refinement strategies such as partitioning or symbolic propagation are typically used to limit the conservativeness of RSOAs, but these approaches come with a high computational cost and often can only be used to verify safety for simple reachability problems. This paper presents Constraint-Aware Refinement for Verification (CARV): an efficient refinement strategy that reduces the conservativeness of RSOAs by explicitly using the safety constraints on the NFL to refine RSOAs only where necessary. We demonstrate that CARV can verify the safety of an NFL where other approaches either fail or take up to 60x longer and 40x the memory.<|reference_end|>
arxiv
@article{rober2024constraint-aware, title={Constraint-Aware Refinement for Safety Verification of Neural Feedback Loops}, author={Nicholas Rober and Jonathan P. How}, journal={arXiv preprint arXiv:2410.00145}, year={2024}, archivePrefix={arXiv}, eprint={2410.00145}, primaryClass={eess.SY cs.LG cs.RO cs.SY} }
rober2024constraint-aware
arxiv-663809
2410.00147
Modeling Turbulence in the Atmospheric Boundary Layer with Spectral Element and Finite Volume Methods
<|reference_start|>Modeling Turbulence in the Atmospheric Boundary Layer with Spectral Element and Finite Volume Methods: We present large-eddy-simulation (LES) modeling approaches for the simulation of atmospheric boundary layer turbulence that are of direct relevance to wind energy production. In this paper, we study a GABLS benchmark problem using high-order spectral element code Nek5000/RS and a block-structured second-order finite-volume code AMR-Wind which are supported under the DOE's Exascale Computing Project (ECP) Center for Efficient Exascale Discretizations (CEED) and ExaWind projects, respectively, targeting application simulations on various acceleration-device based exascale computing platforms. As for Nek5000/RS we demonstrate our newly developed subgrid-scale (SGS) models based on mean-field eddy viscosity (MFEV), high-pass filter (HPF), and Smagorinsky (SMG) with traction boundary conditions. For the traction boundary conditions, a novel analytical approach is presented that solves for the surface friction velocity and surface kinematic temperature flux. For AMR-Wind, standard SMG is used and discussed in detail the traction boundary conditions for convergence. We provide low-order statistics, convergence and turbulent structure analysis. Verification and convergence studies were performed for both codes at various resolutions and it was found that Nek5000/RS demonstrate convergence with resolution for all ABL bulk parameters, including boundary layer and low level jet (LLJ) height. Extensive comparisons are presented with simulation data from the literature.<|reference_end|>
arxiv
@article{churchfield2024modeling, title={Modeling Turbulence in the Atmospheric Boundary Layer with Spectral Element and Finite Volume Methods}, author={Ananias Tomboulides Matthew Churchfield, Paul Fischer, Michael Sprague, Misun Min}, journal={arXiv preprint arXiv:2410.00147}, year={2024}, archivePrefix={arXiv}, eprint={2410.00147}, primaryClass={cs.CE} }
churchfield2024modeling
arxiv-663810
2410.00149
Are Large Language Models In-Context Personalized Summarizers? Get an iCOPERNICUS Test Done!
<|reference_start|>Are Large Language Models In-Context Personalized Summarizers? Get an iCOPERNICUS Test Done!: Large Language Models (LLMs) have succeeded considerably in In-Context-Learning (ICL) based summarization. However, saliency is subject to the users' specific preference histories. Hence, we need reliable In-Context Personalization Learning (ICPL) capabilities within such LLMs. For any arbitrary LLM to exhibit ICPL, it needs to have the ability to discern contrast in user profiles. A recent study proposed a measure for degree-of-personalization called EGISES for the first time. EGISES measures a model's responsiveness to user profile differences. However, it cannot test if a model utilizes all three types of cues provided in ICPL prompts: (i) example summaries, (ii) user's reading histories, and (iii) contrast in user profiles. To address this, we propose the iCOPERNICUS framework, a novel In-COntext PERsonalization learNIng sCrUtiny of Summarization capability in LLMs that uses EGISES as a comparative measure. As a case-study, we evaluate 17 state-of-the-art LLMs based on their reported ICL performances and observe that 15 models' ICPL degrades (min: 1.6%; max: 3.6%) when probed with richer prompts, thereby showing lack of true ICPL.<|reference_end|>
arxiv
@article{patel2024are, title={Are Large Language Models In-Context Personalized Summarizers? Get an iCOPERNICUS Test Done!}, author={Divya Patel, Pathik Patel, Ankush Chander, Sourish Dasgupta, Tanmoy Chakraborty}, journal={arXiv preprint arXiv:2410.00149}, year={2024}, archivePrefix={arXiv}, eprint={2410.00149}, primaryClass={cs.CL cs.LG cs.NE} }
patel2024are
arxiv-663811
2410.00150
What If We Had Used a Different App? Reliable Counterfactual KPI Analysis in Wireless Systems
<|reference_start|>What If We Had Used a Different App? Reliable Counterfactual KPI Analysis in Wireless Systems: In modern wireless network architectures, such as Open Radio Access Network (O-RAN), the operation of the radio access network (RAN) is managed by applications, or apps for short, deployed at intelligent controllers. These apps are selected from a given catalog based on current contextual information. For instance, a scheduling app may be selected on the basis of current traffic and network conditions. Once an app is chosen and run, it is no longer possible to directly test the performance that would have been obtained with another app. This test, however, would be potentially valuable to monitor and optimize the network operation. With this goal in mind, this paper addresses the "what-if" problem of estimating the values of key performance indicators (KPIs) that would have been obtained if a different app had been implemented by the RAN. To this end, we propose a conformal-prediction-based counterfactual analysis method for wireless systems that provides reliable "error bars" for the estimated KPIs, containing the true KPIs with a user-defined probability, despite the inherent covariate shift between logged and test data. Experimental results for medium access control-layer apps and for physical-layer apps demonstrate the merits of the proposed method.<|reference_end|>
arxiv
@article{hou2024what, title={What If We Had Used a Different App? Reliable Counterfactual KPI Analysis in Wireless Systems}, author={Qiushuo Hou, Sangwoo Park, Matteo Zecchin, Yunlong Cai, Guanding Yu, Osvaldo Simeone}, journal={arXiv preprint arXiv:2410.00150}, year={2024}, archivePrefix={arXiv}, eprint={2410.00150}, primaryClass={cs.IT cs.LG cs.NI eess.SP math.IT} }
hou2024what
arxiv-663812
2410.00151
Scheherazade: Evaluating Chain-of-Thought Math Reasoning in LLMs with Chain-of-Problems
<|reference_start|>Scheherazade: Evaluating Chain-of-Thought Math Reasoning in LLMs with Chain-of-Problems: Benchmarks are critical for measuring progress of math reasoning abilities of Large Language Models (LLMs). However, existing widely-used benchmarks such as GSM8K have been rendered less useful as multiple cutting-edge LLMs achieve over 94% accuracy. While harder benchmarks have been proposed, their creation is often manual and expensive. We present Scheherazade, an automated approach for producing challenging mathematical reasoning benchmarks by logically chaining mathematical reasoning problems. We propose two different chaining methods, forward chaining and backward chaining, which require reasoning forward and backward through the chain respectively. We apply Scheherazade on GSM8K to create GSM8K-Scheherazade and evaluate 3 frontier LLMs and OpenAI's o1-preview on it. We show that while frontier models' performance declines precipitously at only a few questions chained, a preliminary evaluation suggests o1-preview performance persists up to 5 questions chained backwards. In addition, while all other models perform worse when problems are chained backwards, o1-preview performs better on backward-chained benchmarks. We will release the dataset and code publicly.<|reference_end|>
arxiv
@article{miner2024scheherazade:, title={Scheherazade: Evaluating Chain-of-Thought Math Reasoning in LLMs with Chain-of-Problems}, author={Stephen Miner, Yoshiki Takashima, Simeng Han, Ferhat Erata, Timos Antonopoulos, Ruzica Piskac, Scott J Shapiro}, journal={arXiv preprint arXiv:2410.00151}, year={2024}, archivePrefix={arXiv}, eprint={2410.00151}, primaryClass={cs.CL} }
miner2024scheherazade:
arxiv-663813
2410.00152
Multimodal Alignment of Histopathological Images Using Cell Segmentation and Point Set Matching for Integrative Cancer Analysis
<|reference_start|>Multimodal Alignment of Histopathological Images Using Cell Segmentation and Point Set Matching for Integrative Cancer Analysis: Histopathological imaging is vital for cancer research and clinical practice, with multiplexed Immunofluorescence (MxIF) and Hematoxylin and Eosin (H&E) providing complementary insights. However, aligning different stains at the cell level remains a challenge due to modality differences. In this paper, we present a novel framework for multimodal image alignment using cell segmentation outcomes. By treating cells as point sets, we apply Coherent Point Drift (CPD) for initial alignment and refine it with Graph Matching (GM). Evaluated on ovarian cancer tissue microarrays (TMAs), our method achieves high alignment accuracy, enabling integration of cell-level features across modalities and generating virtual H&E images from MxIF data for enhanced clinical interpretation.<|reference_end|>
arxiv
@article{jiang2024multimodal, title={Multimodal Alignment of Histopathological Images Using Cell Segmentation and Point Set Matching for Integrative Cancer Analysis}, author={Jun Jiang, Raymond Moore, Brenna Novotny, Leo Liu, Zachary Fogarty, Ray Guo, Markovic Svetomir, Chen Wang}, journal={arXiv preprint arXiv:2410.00152}, year={2024}, archivePrefix={arXiv}, eprint={2410.00152}, primaryClass={eess.IV cs.CV cs.LG q-bio.QM} }
jiang2024multimodal
arxiv-663814
2410.00153
Beyond Single Concept Vector: Modeling Concept Subspace in LLMs with Gaussian Distribution
<|reference_start|>Beyond Single Concept Vector: Modeling Concept Subspace in LLMs with Gaussian Distribution: Probing learned concepts in large language models (LLMs) is crucial for understanding how semantic knowledge is encoded internally. Training linear classifiers on probing tasks is a principle approach to denote the vector of a certain concept in the representation space. However, the single vector identified for a concept varies with both data and training, making it less robust and weakening its effectiveness in real-world applications. To address this challenge, we propose an approach to approximate the subspace representing a specific concept. Built on linear probing classifiers, we extend the concept vectors into Gaussian Concept Subspace (GCS). We demonstrate GCS's effectiveness through measuring its faithfulness and plausibility across multiple LLMs with different sizes and architectures. Additionally, we use representation intervention tasks to showcase its efficacy in real-world applications such as emotion steering. Experimental results indicate that GCS concept vectors have the potential to balance steering performance and maintaining the fluency in natural language generation tasks.<|reference_end|>
arxiv
@article{zhao2024beyond, title={Beyond Single Concept Vector: Modeling Concept Subspace in LLMs with Gaussian Distribution}, author={Haiyan Zhao, Heng Zhao, Bo Shen, Ali Payani, Fan Yang, Mengnan Du}, journal={arXiv preprint arXiv:2410.00153}, year={2024}, archivePrefix={arXiv}, eprint={2410.00153}, primaryClass={cs.CL cs.AI cs.LG} }
zhao2024beyond
arxiv-663815
2410.00157
Constraining Gaussian Process Implicit Surfaces for Robot Manipulation via Dataset Refinement
<|reference_start|>Constraining Gaussian Process Implicit Surfaces for Robot Manipulation via Dataset Refinement: Model-based control faces fundamental challenges in partially-observable environments due to unmodeled obstacles. We propose an online learning and optimization method to identify and avoid unobserved obstacles online. Our method, Constraint Obeying Gaussian Implicit Surfaces (COGIS), infers contact data using a combination of visual input and state tracking, informed by predictions from a nominal dynamics model. We then fit a Gaussian process implicit surface (GPIS) to these data and refine the dataset through a novel method of enforcing constraints on the estimated surface. This allows us to design a Model Predictive Control (MPC) method that leverages the obstacle estimate to complete multiple manipulation tasks. By modeling the environment instead of attempting to directly adapt the dynamics, our method succeeds at both low-dimensional peg-in-hole tasks and high-dimensional deformable object manipulation tasks. Our method succeeds in 10/10 trials vs 1/10 for a baseline on a real-world cable manipulation task under partial observability of the environment.<|reference_end|>
arxiv
@article{kumar2024constraining, title={Constraining Gaussian Process Implicit Surfaces for Robot Manipulation via Dataset Refinement}, author={Abhinav Kumar, Peter Mitrano, Dmitry Berenson}, journal={arXiv preprint arXiv:2410.00157}, year={2024}, archivePrefix={arXiv}, eprint={2410.00157}, primaryClass={cs.RO} }
kumar2024constraining
arxiv-663816
2410.00161
KV-Compress: Paged KV-Cache Compression with Variable Compression Rates per Attention Head
<|reference_start|>KV-Compress: Paged KV-Cache Compression with Variable Compression Rates per Attention Head: Context lengths of Large Language Models (LLMs) have exploded in recent years, with 128k-token context becoming a standard and million-token context becoming a reality. Efficiently supporting long-context inference remains challenging as the memory that must be allocated in key-value (KV) cache for a generation scales with its context length, limiting the number of long-context requests that can be served concurrently under a given memory budget. KV cache compression can mitigate this issue by removing under-utilized KVs from each attention head's cache and reducing its memory footprint. Higher theoretical compression rates can be achieved when the number of removed KVs varies across attention heads, but application of such a strategy within existing inference frameworks adds fragmentation and cannot realize the theoretical compression rates in physical memory. We introduce KV-Compress, a novel compression method that evicts contiguous KV blocks within a PagedAttention framework, reducing the memory footprint of the KV cache proportionally to this theoretical compression rate. Our method achieves state-of-the-art performance on LongBench for both Mistral-7B-Instruct-v0.2 and Llama-3.1-8B-Instruct while lowering the total number of compressed KVs by 4x compared with prior methods. Evaluations on Llama-3.1-8B-Instruct and Llama-3.1-70B-Instruct-FP8 achieve compression rates up to 8x with negligible impact on performance, and up to 64x while retaining over 90% of full-cache performance for all but three of the suite's subsets. We benchmark an integration of our method with vLLM that increases total throughput by up to 5.18x by enabling larger decoding batches.<|reference_end|>
arxiv
@article{rehg2024kv-compress:, title={KV-Compress: Paged KV-Cache Compression with Variable Compression Rates per Attention Head}, author={Isaac Rehg}, journal={arXiv preprint arXiv:2410.00161}, year={2024}, archivePrefix={arXiv}, eprint={2410.00161}, primaryClass={cs.CL} }
rehg2024kv-compress:
arxiv-663817
2410.00163
Adapting LLMs for the Medical Domain in Portuguese: A Study on Fine-Tuning and Model Evaluation
<|reference_start|>Adapting LLMs for the Medical Domain in Portuguese: A Study on Fine-Tuning and Model Evaluation: This study evaluates the performance of large language models (LLMs) as medical agents in Portuguese, aiming to develop a reliable and relevant virtual assistant for healthcare professionals. The HealthCareMagic-100k-en and MedQuAD datasets, translated from English using GPT-3.5, were used to fine-tune the ChatBode-7B model using the PEFT-QLoRA method. The InternLM2 model, with initial training on medical data, presented the best overall performance, with high precision and adequacy in metrics such as accuracy, completeness and safety. However, DrBode models, derived from ChatBode, exhibited a phenomenon of catastrophic forgetting of acquired medical knowledge. Despite this, these models performed frequently or even better in aspects such as grammaticality and coherence. A significant challenge was low inter-rater agreement, highlighting the need for more robust assessment protocols. This work paves the way for future research, such as evaluating multilingual models specific to the medical field, improving the quality of training data, and developing more consistent evaluation methodologies for the medical field.<|reference_end|>
arxiv
@article{paiola2024adapting, title={Adapting LLMs for the Medical Domain in Portuguese: A Study on Fine-Tuning and Model Evaluation}, author={Pedro Henrique Paiola, Gabriel Lino Garcia, Jo~ao Renato Ribeiro Manesco, Mateus Roder, Douglas Rodrigues, Jo~ao Paulo Papa}, journal={arXiv preprint arXiv:2410.00163}, year={2024}, archivePrefix={arXiv}, eprint={2410.00163}, primaryClass={cs.CL cs.AI} }
paiola2024adapting
arxiv-663818
2410.00166
EEG Emotion Copilot: Pruning LLMs for Emotional EEG Interpretation with Assisted Medical Record Generation
<|reference_start|>EEG Emotion Copilot: Pruning LLMs for Emotional EEG Interpretation with Assisted Medical Record Generation: In the fields of affective computing (AC) and brain-machine interface (BMI), the analysis of physiological and behavioral signals to discern individual emotional states has emerged as a critical research frontier. While deep learning-based approaches have made notable strides in EEG emotion recognition, particularly in feature extraction and pattern recognition, significant challenges persist in achieving end-to-end emotion computation, including real-time processing, individual adaptation, and seamless user interaction. This paper presents the EEG Emotion Copilot, a system leveraging a lightweight large language model (LLM) operating in a local setting. The system is designed to first recognize emotional states directly from EEG signals, subsequently generate personalized diagnostic and treatment suggestions, and finally support the automation of electronic medical records. The proposed solution emphasizes both the accuracy of emotion recognition and an enhanced user experience, facilitated by an intuitive interface for participant interaction. We further discuss the construction of the data framework, model pruning, training, and deployment strategies aimed at improving real-time performance and computational efficiency. Privacy concerns are also addressed, with a focus on ethical data collection, processing, and the protection of users' personal information. Through these efforts, we aim to advance the application of AC in the medical domain, offering innovative approaches to mental health diagnostics and treatment.<|reference_end|>
arxiv
@article{chen2024eeg, title={EEG Emotion Copilot: Pruning LLMs for Emotional EEG Interpretation with Assisted Medical Record Generation}, author={Hongyu Chen, Weiming Zeng, Chengcheng Chen, Luhui Cai, Fei Wang, Lei Wang, Wei Zhang, Yueyang Li, Hongjie Yan, Wai Ting Siok and Nizhuan Wang}, journal={arXiv preprint arXiv:2410.00166}, year={2024}, archivePrefix={arXiv}, eprint={2410.00166}, primaryClass={cs.CV} }
chen2024eeg
arxiv-663819
2410.00168
SSR: Alignment-Aware Modality Connector for Speech Language Models
<|reference_start|>SSR: Alignment-Aware Modality Connector for Speech Language Models: Fusing speech into pre-trained language model (SpeechLM) usually suffers from inefficient encoding of long-form speech and catastrophic forgetting of pre-trained text modality. We propose SSR-Connector (Segmented Speech Representation Connector) for better modality fusion. Leveraging speech-text alignments, our approach segments and compresses speech features to match the granularity of text embeddings. Additionally, we introduce a two-stage training pipeline that includes the distillation and fine-tuning phases to mitigate catastrophic forgetting. SSR-Connector outperforms existing mechanism for speech-text modality fusion, consistently achieving better speech understanding (e.g., +10 accuracy on StoryCloze and +20 on Speech-MMLU) while preserving pre-trained text ability.<|reference_end|>
arxiv
@article{tan2024ssr:, title={SSR: Alignment-Aware Modality Connector for Speech Language Models}, author={Weiting Tan, Hirofumi Inaguma, Ning Dong, Paden Tomasello, Xutai Ma}, journal={arXiv preprint arXiv:2410.00168}, year={2024}, archivePrefix={arXiv}, eprint={2410.00168}, primaryClass={cs.CL cs.SD eess.AS} }
tan2024ssr:
arxiv-663820
2410.00169
(Almost) Smooth Sailing: Towards Numerical Stability of Neural Networks Through Differentiable Regularization of the Condition Number
<|reference_start|>(Almost) Smooth Sailing: Towards Numerical Stability of Neural Networks Through Differentiable Regularization of the Condition Number: Maintaining numerical stability in machine learning models is crucial for their reliability and performance. One approach to maintain stability of a network layer is to integrate the condition number of the weight matrix as a regularizing term into the optimization algorithm. However, due to its discontinuous nature and lack of differentiability the condition number is not suitable for a gradient descent approach. This paper introduces a novel regularizer that is provably differentiable almost everywhere and promotes matrices with low condition numbers. In particular, we derive a formula for the gradient of this regularizer which can be easily implemented and integrated into existing optimization algorithms. We show the advantages of this approach for noisy classification and denoising of MNIST images.<|reference_end|>
arxiv
@article{nenov2024(almost), title={(Almost) Smooth Sailing: Towards Numerical Stability of Neural Networks Through Differentiable Regularization of the Condition Number}, author={Rossen Nenov, Daniel Haider, Peter Balazs}, journal={arXiv preprint arXiv:2410.00169}, year={2024}, archivePrefix={arXiv}, eprint={2410.00169}, primaryClass={cs.LG math.OC stat.ML} }
nenov2024(almost)
arxiv-663821
2410.00171
Basis-to-Basis Operator Learning Using Function Encoders
<|reference_start|>Basis-to-Basis Operator Learning Using Function Encoders: We present Basis-to-Basis (B2B) operator learning, a novel approach for learning operators on Hilbert spaces of functions based on the foundational ideas of function encoders. We decompose the task of learning operators into two parts: learning sets of basis functions for both the input and output spaces, and learning a potentially nonlinear mapping between the coefficients of the basis functions. B2B operator learning circumvents many challenges of prior works, such as requiring data to be at fixed locations, by leveraging classic techniques such as least-squares to compute the coefficients. It is especially potent for linear operators, where we compute a mapping between bases as a single matrix transformation with a closed form solution. Furthermore, with minimal modifications and using the deep theoretical connections between function encoders and functional analysis, we derive operator learning algorithms that are directly analogous to eigen-decomposition and singular value decomposition. We empirically validate B2B operator learning on six benchmark operator learning tasks, and show that it demonstrates a two-orders-of-magnitude improvement in accuracy over existing approaches on several benchmark tasks.<|reference_end|>
arxiv
@article{ingebrand2024basis-to-basis, title={Basis-to-Basis Operator Learning Using Function Encoders}, author={Tyler Ingebrand, Adam J. Thorpe, Somdatta Goswami, Krishna Kumar, Ufuk Topcu}, journal={arXiv preprint arXiv:2410.00171}, year={2024}, archivePrefix={arXiv}, eprint={2410.00171}, primaryClass={cs.LG} }
ingebrand2024basis-to-basis
arxiv-663822
2410.00173
GaNDLF-Synth: A Framework to Democratize Generative AI for (Bio)Medical Imaging
<|reference_start|>GaNDLF-Synth: A Framework to Democratize Generative AI for (Bio)Medical Imaging: Generative Artificial Intelligence (GenAI) is a field of AI that creates new data samples from existing ones. It utilizing deep learning to overcome the scarcity and regulatory constraints of healthcare data by generating new data points that integrate seamlessly with original datasets. This paper explores the background and motivation for GenAI, and introduces the Generally Nuanced Deep Learning Framework for Synthesis (GaNDLF-Synth) to address a significant gap in the literature and move towards democratizing the implementation and assessment of image synthesis tasks in healthcare. GaNDLF-Synth describes a unified abstraction for various synthesis algorithms, including autoencoders, generative adversarial networks, and diffusion models. Leveraging the GANDLF-core framework, it supports diverse data modalities and distributed computing, ensuring scalability and reproducibility through extensive unit testing. The aim of GaNDLF-Synth is to lower the entry barrier for GenAI, and make it more accessible and extensible by the wider scientific community.<|reference_end|>
arxiv
@article{pati2024gandlf-synth:, title={GaNDLF-Synth: A Framework to Democratize Generative AI for (Bio)Medical Imaging}, author={Sarthak Pati, Szymon Mazurek, Spyridon Bakas}, journal={arXiv preprint arXiv:2410.00173}, year={2024}, archivePrefix={arXiv}, eprint={2410.00173}, primaryClass={cs.LG} }
pati2024gandlf-synth:
arxiv-663823
2410.00174
Exploring Interdisciplinary Team Collaboration in Clinical NLP Projects Through the Lens of Activity Theory
<|reference_start|>Exploring Interdisciplinary Team Collaboration in Clinical NLP Projects Through the Lens of Activity Theory: Natural Language Processing (NLP) techniques have been increasingly integrated into clinical projects to advance clinical decision-making and improve patient outcomes. Such projects benefit from interdisciplinary team collaborations. This paper explores challenges and opportunities using two clinical NLP projects as case studies, where speech-language pathologists (SLPs) and NLP researchers jointly developed technology-based systems to improve clinical workflow. Through semi-structured interviews with five SLPs and four NLP researchers, we collected collaboration practices and challenges. Using Activity Theory as an analytical framework, we examined collaborative activities, challenges, and strategies to bridge interdisciplinary gaps. Our findings revealed significant knowledge boundaries and terminological barriers between SLPs and NLP researchers when both groups relied on clinical data as boundary objects to facilitate collaboration, although this approach has limitations. We highlight the potential opportunities of AI technologies as knowledge brokers to overcome interdisciplinary collaboration challenges.<|reference_end|>
arxiv
@article{yao2024exploring, title={Exploring Interdisciplinary Team Collaboration in Clinical NLP Projects Through the Lens of Activity Theory}, author={Bingsheng Yao, Yao Du, Yue Fu, Xuhai Xu, Yanjun Gao, Hong Yu, Dakuo Wang}, journal={arXiv preprint arXiv:2410.00174}, year={2024}, archivePrefix={arXiv}, eprint={2410.00174}, primaryClass={cs.HC} }
yao2024exploring
arxiv-663824
2410.00175
Adaptable Moral Stances of Large Language Models on Sexist Content: Implications for Society and Gender Discourse
<|reference_start|>Adaptable Moral Stances of Large Language Models on Sexist Content: Implications for Society and Gender Discourse: This work provides an explanatory view of how LLMs can apply moral reasoning to both criticize and defend sexist language. We assessed eight large language models, all of which demonstrated the capability to provide explanations grounded in varying moral perspectives for both critiquing and endorsing views that reflect sexist assumptions. With both human and automatic evaluation, we show that all eight models produce comprehensible and contextually relevant text, which is helpful in understanding diverse views on how sexism is perceived. Also, through analysis of moral foundations cited by LLMs in their arguments, we uncover the diverse ideological perspectives in models' outputs, with some models aligning more with progressive or conservative views on gender roles and sexism. Based on our observations, we caution against the potential misuse of LLMs to justify sexist language. We also highlight that LLMs can serve as tools for understanding the roots of sexist beliefs and designing well-informed interventions. Given this dual capacity, it is crucial to monitor LLMs and design safety mechanisms for their use in applications that involve sensitive societal topics, such as sexism.<|reference_end|>
arxiv
@article{guo2024adaptable, title={Adaptable Moral Stances of Large Language Models on Sexist Content: Implications for Society and Gender Discourse}, author={Rongchen Guo, Isar Nejadgholi, Hillary Dawkins, Kathleen C. Fraser, Svetlana Kiritchenko}, journal={arXiv preprint arXiv:2410.00175}, year={2024}, archivePrefix={arXiv}, eprint={2410.00175}, primaryClass={cs.CL} }
guo2024adaptable
arxiv-663825
2410.00178
Streaming Data in HPC Workflows Using ADIOS
<|reference_start|>Streaming Data in HPC Workflows Using ADIOS: The "IO Wall" problem, in which the gap between computation rate and data access rate grows continuously, poses significant problems to scientific workflows which have traditionally relied upon using the filesystem for intermediate storage between workflow stages. One way to avoid this problem in scientific workflows is to stream data directly from producers to consumers and avoiding storage entirely. However, the manner in which this is accomplished is key to both performance and usability. This paper presents the Sustainable Staging Transport, an approach which allows direct streaming between traditional file writers and readers with few application changes. SST is an ADIOS "engine", accessible via standard ADIOS APIs, and because ADIOS allows engines to be chosen at run-time, many existing file-oriented ADIOS workflows can utilize SST for direct application-to-application communication without any source code changes. This paper describes the design of SST and presents performance results from various applications that use SST, for feeding model training with simulation data with substantially higher bandwidth than the theoretical limits of Frontier's file system, for strong coupling of separately developed applications for multiphysics multiscale simulation, or for in situ analysis and visualization of data to complete all data processing shortly after the simulation finishes.<|reference_end|>
arxiv
@article{eisenhauer2024streaming, title={Streaming Data in HPC Workflows Using ADIOS}, author={Greg Eisenhauer, Norbert Podhorszki, Ana Gainaru, Scott Klasky, Philip E. Davis, Manish Parashar, Matthew Wolf, Eric Suchtya, Erick Fredj, Vicente Bolea, Franz P"oschel, Klaus Steiniger, Michael Bussmann, Richard Pausch, Sunita Chandrasekaran}, journal={arXiv preprint arXiv:2410.00178}, year={2024}, archivePrefix={arXiv}, eprint={2410.00178}, primaryClass={cs.PF} }
eisenhauer2024streaming
arxiv-663826
2410.00179
Evaluating the fairness of task-adaptive pretraining on unlabeled test data before few-shot text classification
<|reference_start|>Evaluating the fairness of task-adaptive pretraining on unlabeled test data before few-shot text classification: Few-shot learning benchmarks are critical for evaluating modern NLP techniques. It is possible, however, that benchmarks favor methods which easily make use of unlabeled text, because researchers can use unlabeled text from the test set to pretrain their models. Given the dearth of research on this potential problem, we run experiments to quantify the bias caused by pretraining on unlabeled test set text instead of on unlabeled, independently drawn text. Controlled few-shot and zero-shot experiments on 25 classification tasks and 3 language models -- BERT, GPT-2, and Mistral 7B -- do not find evidence of overoptimism. Furthermore, we demonstrate the importance of repeated subsampling when studying few-shot text classification, and recommend that few-shot learning benchmarks include multiple training folds. Code and data are available at https://github.com/kddubey/pretrain-on-test/.<|reference_end|>
arxiv
@article{dubey2024evaluating, title={Evaluating the fairness of task-adaptive pretraining on unlabeled test data before few-shot text classification}, author={Kush Dubey}, journal={arXiv preprint arXiv:2410.00179}, year={2024}, archivePrefix={arXiv}, eprint={2410.00179}, primaryClass={cs.CL cs.LG} }
dubey2024evaluating
arxiv-663827
2410.00181
Analysis of human steering behavior differences in human-in-control and autonomy-in-control driving
<|reference_start|>Analysis of human steering behavior differences in human-in-control and autonomy-in-control driving: Steering models (such as the generalized two-point model) predict human steering behavior well when the human is in direct control of a vehicle. In vehicles under autonomous control, human control inputs are not used; rather, an autonomous controller applies steering and acceleration commands to the vehicle. For example, human steering input may be used for state estimation rather than direct control. We show that human steering behavior changes when the human no longer directly controls the vehicle and the two are instead working in a shared autonomy paradigm. Thus, when a vehicle is not under direct human control, steering models like the generalized two-point model do not predict human steering behavior. We also show that the error between predicted human steering behavior and actual human steering behavior reflects a fundamental difference when the human directly controls the vehicle compared to when the vehicle is autonomously controlled. Moreover, we show that a single distribution describes the error between predicted human steering behavior and actual human steering behavior when the human's steering inputs are used for state estimation and the vehicle is autonomously controlled, indicating there may be a underlying model for human steering behavior under this type of shared autonomous control. Future work includes determining this shared autonomous human steering model and demonstrating its performance.<|reference_end|>
arxiv
@article{mai2024analysis, title={Analysis of human steering behavior differences in human-in-control and autonomy-in-control driving}, author={Rene Mai, Agung Julius, Sandipan Mishra}, journal={arXiv preprint arXiv:2410.00181}, year={2024}, archivePrefix={arXiv}, eprint={2410.00181}, primaryClass={eess.SY cs.HC cs.SY} }
mai2024analysis
arxiv-663828
2410.00182
Zero-Shot Classification of Crisis Tweets Using Instruction-Finetuned Large Language Models
<|reference_start|>Zero-Shot Classification of Crisis Tweets Using Instruction-Finetuned Large Language Models: Social media posts are frequently identified as a valuable source of open-source intelligence for disaster response, and pre-LLM NLP techniques have been evaluated on datasets of crisis tweets. We assess three commercial large language models (OpenAI GPT-4o, Gemini 1.5-flash-001 and Anthropic Claude-3-5 Sonnet) capabilities in zero-shot classification of short social media posts. In one prompt, the models are asked to perform two classification tasks: 1) identify if the post is informative in a humanitarian context; and 2) rank and provide probabilities for the post in relation to 16 possible humanitarian classes. The posts being classified are from the consolidated crisis tweet dataset, CrisisBench. Results are evaluated using macro, weighted, and binary F1-scores. The informative classification task, generally performed better without extra information, while for the humanitarian label classification providing the event that occurred during which the tweet was mined, resulted in better performance. Further, we found that the models have significantly varying performance by dataset, which raises questions about dataset quality.<|reference_end|>
arxiv
@article{mcdaniel2024zero-shot, title={Zero-Shot Classification of Crisis Tweets Using Instruction-Finetuned Large Language Models}, author={Emma McDaniel, Samuel Scheele, Jeff Liu}, journal={arXiv preprint arXiv:2410.00182}, year={2024}, archivePrefix={arXiv}, eprint={2410.00182}, primaryClass={cs.CL cs.AI} }
mcdaniel2024zero-shot
arxiv-663829
2410.00184
Volumetric Conditional Score-based Residual Diffusion Model for PET/MR Denoising
<|reference_start|>Volumetric Conditional Score-based Residual Diffusion Model for PET/MR Denoising: PET imaging is a powerful modality offering quantitative assessments of molecular and physiological processes. The necessity for PET denoising arises from the intrinsic high noise levels in PET imaging, which can significantly hinder the accurate interpretation and quantitative analysis of the scans. With advances in deep learning techniques, diffusion model-based PET denoising techniques have shown remarkable performance improvement. However, these models often face limitations when applied to volumetric data. Additionally, many existing diffusion models do not adequately consider the unique characteristics of PET imaging, such as its 3D volumetric nature, leading to the potential loss of anatomic consistency. Our Conditional Score-based Residual Diffusion (CSRD) model addresses these issues by incorporating a refined score function and 3D patch-wise training strategy, optimizing the model for efficient volumetric PET denoising. The CSRD model significantly lowers computational demands and expedites the denoising process. By effectively integrating volumetric data from PET and MRI scans, the CSRD model maintains spatial coherence and anatomical detail. Lastly, we demonstrate that the CSRD model achieves superior denoising performance in both qualitative and quantitative evaluations while maintaining image details and outperforms existing state-of-the-art methods.<|reference_end|>
arxiv
@article{yoon2024volumetric, title={Volumetric Conditional Score-based Residual Diffusion Model for PET/MR Denoising}, author={Siyeop Yoon, Rui Hu, Yuang Wang, Matthew Tivnan, Young-don Son, Dufan Wu, Xiang Li, Kyungsang Kim, and Quanzheng Li}, journal={arXiv preprint arXiv:2410.00184}, year={2024}, archivePrefix={arXiv}, eprint={2410.00184}, primaryClass={eess.IV cs.CV cs.LG} }
yoon2024volumetric
arxiv-663830
2410.00185
The Patterns of Life Human Mobility Simulation
<|reference_start|>The Patterns of Life Human Mobility Simulation: We demonstrate the Patterns of Life Simulation to create realistic simulations of human mobility in a city. This simulation has recently been used to generate massive amounts of trajectory and check-in data. Our demonstration focuses on using the simulation twofold: (1) using the graphical user interface (GUI), and (2) running the simulation headless by disabling the GUI for faster data generation. We further demonstrate how the Patterns of Life simulation can be used to simulate any region on Earth by using publicly available data from OpenStreetMap. Finally, we also demonstrate recent improvements to the scalability of the simulation allows simulating up to 100,000 individual agents for years of simulation time. During our demonstration, as well as offline using our guides on GitHub, participants will learn: (1) The theories of human behavior driving the Patters of Life simulation, (2) how to simulate to generate massive amounts of synthetic yet realistic trajectory data, (3) running the simulation for a region of interest chosen by participants using OSM data, (4) learn the scalability of the simulation and understand the properties of generated data, and (5) manage thousands of parallel simulation instances running concurrently.<|reference_end|>
arxiv
@article{amiri2024the, title={The Patterns of Life Human Mobility Simulation}, author={Hossein Amiri, Will Kohn, Shiyang Ruan, Joon-Seok Kim, Hamdi Kavak, Andrew Crooks, Dieter Pfoser, Carola Wenk, and Andreas Zufle}, journal={arXiv preprint arXiv:2410.00185}, year={2024}, archivePrefix={arXiv}, eprint={2410.00185}, primaryClass={cs.MA cs.HC} }
amiri2024the
arxiv-663831
2410.00192
Large-scale, Longitudinal, Hybrid Participatory Design Program to Create Navigation Technology for the Blind
<|reference_start|>Large-scale, Longitudinal, Hybrid Participatory Design Program to Create Navigation Technology for the Blind: Empowering people who are blind or visually impaired (BVI) to enhance their orientation and mobility skills is critical to equalizing their access to social and economic opportunities. To manage this crucial challenge, we employed a novel design process based on a large-scale, longitudinal, community-based structure. Across three annual programs we engaged with the BVI community in online and in-person modes. In total, our team included 67 total BVI participatory design participants online, 11 BVI co-designers in-person, and 4 BVI program coordinators. Through this design process we built a mobile application that enables users to generate, share, and navigate maps of indoor and outdoor environments without the need to instrument each environment with beacons or fiducial markers. We evaluated this app at a healthcare facility, and participants in the evaluation rated the app highly with respect to its design, features, and potential for positive impact on quality of life.<|reference_end|>
arxiv
@article{chung2024large-scale,, title={Large-scale, Longitudinal, Hybrid Participatory Design Program to Create Navigation Technology for the Blind}, author={Daeun Joyce Chung, Muya Guoji, Nina Mindel, Alexis Malkin, Fernando Alberotrio, Shane Lowe, Chris McNally, Casandra Xavier, Paul Ruvolo}, journal={arXiv preprint arXiv:2410.00192}, year={2024}, archivePrefix={arXiv}, eprint={2410.00192}, primaryClass={cs.HC} }
chung2024large-scale,
arxiv-663832
2410.00193
Do Vision-Language Models Really Understand Visual Language?
<|reference_start|>Do Vision-Language Models Really Understand Visual Language?: Visual language is a system of communication that conveys information through symbols, shapes, and spatial arrangements. Diagrams are a typical example of a visual language depicting complex concepts and their relationships in the form of an image. The symbolic nature of diagrams presents significant challenges for building models capable of understanding them. Yet, recent studies seem to suggest that Large Vision-Language Models (LVLMs) can even tackle complex reasoning tasks involving diagrams. In this paper, we investigate this phenomenon by developing a comprehensive test suite to evaluate the diagram comprehension capability of LVLMs. Our test suite uses a variety of questions focused on concept entities and their relationships over a set of synthetic as well as real diagrams across several domains to evaluate the recognition and reasoning abilities of models. Our evaluation of three LVLMs (GPT-4V, GPT-4o, and Gemini) shows that while these models can accurately identify and reason about entities, their ability to understand relationships is notably limited. Further testing reveals that the decent performance on diagram understanding largely stems from leveraging their background knowledge as shortcuts to identify and reason about the relational information. Thus, we conclude that LVLMs have a limited capability for genuine diagram understanding, and their impressive performance in diagram reasoning is an illusion emanating from other confounding factors, such as the background knowledge in the models.<|reference_end|>
arxiv
@article{giledereli2024do, title={Do Vision-Language Models Really Understand Visual Language?}, author={Buse Giledereli, Yifan Hou, Yilei Tu, Mrinmaya Sachan}, journal={arXiv preprint arXiv:2410.00193}, year={2024}, archivePrefix={arXiv}, eprint={2410.00193}, primaryClass={cs.CL cs.CV} }
giledereli2024do
arxiv-663833
2410.00194
"Real Learner Data Matters" Exploring the Design of LLM-Powered Question Generation for Deaf and Hard of Hearing Learners
<|reference_start|>"Real Learner Data Matters" Exploring the Design of LLM-Powered Question Generation for Deaf and Hard of Hearing Learners: Deaf and Hard of Hearing (DHH) learners face unique challenges in learning environments, often due to a lack of tailored educational materials that address their specific needs. This study explores the potential of Large Language Models (LLMs) to generate personalized quiz questions to enhance DHH students' video-based learning experiences. We developed a prototype leveraging LLMs to generate questions with emphasis on two unique strategies: Visual Questions, which identify video segments where visual information might be misrepresented, and Emotion Questions, which highlight moments where previous DHH learners experienced learning difficulty manifested in emotional responses. Through user studies with DHH undergraduates, we evaluated the effectiveness of these LLM-generated questions in supporting the learning experience. Our findings indicate that while LLMs offer significant potential for personalized learning, challenges remain in the interaction accessibility for the diverse DHH community. The study highlights the importance of considering language diversity and culture in LLM-based educational technology design.<|reference_end|>
arxiv
@article{cheng2024"real, title={"Real Learner Data Matters" Exploring the Design of LLM-Powered Question Generation for Deaf and Hard of Hearing Learners}, author={Si Cheng, Shuxu Huffman, Qingxiaoyang Zhu, Haotian Su, Raja Kushalnagar, Qi Wang}, journal={arXiv preprint arXiv:2410.00194}, year={2024}, archivePrefix={arXiv}, eprint={2410.00194}, primaryClass={cs.HC} }
cheng2024"real
arxiv-663834
2410.00196
Motion Design Principles for Accessible Video-based Learning: Addressing Cognitive Challenges for Deaf and Hard of Hearing Learners
<|reference_start|>Motion Design Principles for Accessible Video-based Learning: Addressing Cognitive Challenges for Deaf and Hard of Hearing Learners: Deaf and Hard-of-Hearing (DHH) learners face unique challenges in video-based learning due to the complex interplay between visual and auditory information in videos. Traditional approaches to making video content accessible primarily focus on captioning, but these solutions often neglect the cognitive demands of processing both visual and textual information simultaneously. This paper introduces a set of \textit{Motion} design guidelines, aimed at mitigating these cognitive challenges and improving video learning experiences for DHH learners. Through a two-phase research, we identified five key challenges, including misaligned content and visual overload. We proposed five design principles accordingly. User study with 16 DHH participants showed that improving visual-audio relevance and guiding visual attention significantly enhances the learning experience by reducing physical demand, alleviating temporal pressure, and improving learning satisfaction. Our findings highlight the potential of Motion design to transform educational content for DHH learners, and we discuss implications for inclusive video learning tools.<|reference_end|>
arxiv
@article{cheng2024motion, title={Motion Design Principles for Accessible Video-based Learning: Addressing Cognitive Challenges for Deaf and Hard of Hearing Learners}, author={Si Cheng, Haocong Cheng, Suzy Su, Lu Ming, Sarah Masud, Qi Wang, Yun Huang}, journal={arXiv preprint arXiv:2410.00196}, year={2024}, archivePrefix={arXiv}, eprint={2410.00196}, primaryClass={cs.HC} }
cheng2024motion
arxiv-663835
2410.00199
Inclusive Emotion Technologies: Addressing the Needs of d/Deaf and Hard of Hearing Learners in Video-Based Learning
<|reference_start|>Inclusive Emotion Technologies: Addressing the Needs of d/Deaf and Hard of Hearing Learners in Video-Based Learning: Accessibility efforts for d/Deaf and hard of hearing (DHH) learners in video-based learning have mainly focused on captions and interpreters, with limited attention to learners' emotional awareness--an important yet challenging skill for effective learning. Current emotion technologies are designed to support learners' emotional awareness and social needs; however, little is known about whether and how DHH learners could benefit from these technologies. Our study explores how DHH learners perceive and use emotion data from two collection approaches, self-reported and automatic emotion recognition (AER), in video-based learning. By comparing the use of these technologies between DHH (N=20) and hearing learners (N=20), we identified key differences in their usage and perceptions: 1) DHH learners enhanced their emotional awareness by rewatching the video to self-report their emotions and called for alternative methods for self-reporting emotion, such as using sign language or expressive emoji designs; and 2) while the AER technology could be useful for detecting emotional patterns in learning experiences, DHH learners expressed more concerns about the accuracy and intrusiveness of the AER data. Our findings provide novel design implications for improving the inclusiveness of emotion technologies to support DHH learners, such as leveraging DHH peer learners' emotions to elicit reflections.<|reference_end|>
arxiv
@article{chen2024inclusive, title={Inclusive Emotion Technologies: Addressing the Needs of d/Deaf and Hard of Hearing Learners in Video-Based Learning}, author={Si Chen, Jason Situ, Haocong Cheng, Suzy Su, Desiree Kirst, Lu Ming, Qi Wang, Lawrence Angrave, Yun Huang}, journal={arXiv preprint arXiv:2410.00199}, year={2024}, archivePrefix={arXiv}, eprint={2410.00199}, primaryClass={cs.HC} }
chen2024inclusive
arxiv-663836
2410.00201
DreamStruct: Understanding Slides and User Interfaces via Synthetic Data Generation
<|reference_start|>DreamStruct: Understanding Slides and User Interfaces via Synthetic Data Generation: Enabling machines to understand structured visuals like slides and user interfaces is essential for making them accessible to people with disabilities. However, achieving such understanding computationally has required manual data collection and annotation, which is time-consuming and labor-intensive. To overcome this challenge, we present a method to generate synthetic, structured visuals with target labels using code generation. Our method allows people to create datasets with built-in labels and train models with a small number of human-annotated examples. We demonstrate performance improvements in three tasks for understanding slides and UIs: recognizing visual elements, describing visual content, and classifying visual content types.<|reference_end|>
arxiv
@article{peng2024dreamstruct:, title={DreamStruct: Understanding Slides and User Interfaces via Synthetic Data Generation}, author={Yi-Hao Peng, Faria Huq, Yue Jiang, Jason Wu, Amanda Xin Yue Li, Jeffrey Bigham, Amy Pavel}, journal={arXiv preprint arXiv:2410.00201}, year={2024}, archivePrefix={arXiv}, eprint={2410.00201}, primaryClass={cs.CV cs.CL} }
peng2024dreamstruct:
arxiv-663837
2410.00202
Spectral Element Simulation of Liquid Metal Magnetohydrodynamics
<|reference_start|>Spectral Element Simulation of Liquid Metal Magnetohydrodynamics: A spectral-element-based formulation of incompressible MHD is presented in the context of the open-source fluid-thermal code, Nek5000/RS. The formulation supports magnetic fields in a solid domain that surrounds the fluid domain. Several steady-state and time-transient model problems are presented as part of the code verification process. Nek5000/RS is designed for large-scale turbulence simulations, which will be the next step with this new MHD capability.<|reference_end|>
arxiv
@article{guo2024spectral, title={Spectral Element Simulation of Liquid Metal Magnetohydrodynamics}, author={Yichen Guo, Paul Fischer, and Misun Min}, journal={arXiv preprint arXiv:2410.00202}, year={2024}, archivePrefix={arXiv}, eprint={2410.00202}, primaryClass={cs.CE} }
guo2024spectral
arxiv-663838
2410.00203
Multilevel Picard approximations overcome the curse of dimensionality when approximating semilinear heat equations with gradient-dependent nonlinearities in $L^p$-sense
<|reference_start|>Multilevel Picard approximations overcome the curse of dimensionality when approximating semilinear heat equations with gradient-dependent nonlinearities in $L^p$-sense: We prove that multilevel Picard approximations are capable of approximating solutions of semilinear heat equations in $L^{p}$-sense, ${p}\in [2,\infty)$, in the case of gradient-dependent, Lipschitz-continuous nonlinearities, in the sense that the computational effort of the multilevel Picard approximations grow at most polynomially in both the dimension $d$ and the reciprocal $1/\epsilon$ of the prescribed accuracy $\epsilon$.<|reference_end|>
arxiv
@article{nguyen2024multilevel, title={Multilevel Picard approximations overcome the curse of dimensionality when approximating semilinear heat equations with gradient-dependent nonlinearities in $L^p$-sense}, author={Tuan Anh Nguyen}, journal={arXiv preprint arXiv:2410.00203}, year={2024}, archivePrefix={arXiv}, eprint={2410.00203}, primaryClass={math.NA cs.NA math.AP} }
nguyen2024multilevel
arxiv-663839
2410.00204
OpenAnimals: Revisiting Person Re-Identification for Animals Towards Better Generalization
<|reference_start|>OpenAnimals: Revisiting Person Re-Identification for Animals Towards Better Generalization: This paper addresses the challenge of animal re-identification, an emerging field that shares similarities with person re-identification but presents unique complexities due to the diverse species, environments and poses. To facilitate research in this domain, we introduce OpenAnimals, a flexible and extensible codebase designed specifically for animal re-identification. We conduct a comprehensive study by revisiting several state-of-the-art person re-identification methods, including BoT, AGW, SBS, and MGN, and evaluate their effectiveness on animal re-identification benchmarks such as HyenaID, LeopardID, SeaTurtleID, and WhaleSharkID. Our findings reveal that while some techniques generalize well, many do not, underscoring the significant differences between the two tasks. To bridge this gap, we propose ARBase, a strong \textbf{Base} model tailored for \textbf{A}nimal \textbf{R}e-identification, which incorporates insights from extensive experiments and introduces simple yet effective animal-oriented designs. Experiments demonstrate that ARBase consistently outperforms existing baselines, achieving state-of-the-art performance across various benchmarks.<|reference_end|>
arxiv
@article{hou2024openanimals:, title={OpenAnimals: Revisiting Person Re-Identification for Animals Towards Better Generalization}, author={Saihui Hou, Panjian Huang, Zengbin Wang, Yuan Liu, Zeyu Li, Man Zhang, Yongzhen Huang}, journal={arXiv preprint arXiv:2410.00204}, year={2024}, archivePrefix={arXiv}, eprint={2410.00204}, primaryClass={cs.CV} }
hou2024openanimals:
arxiv-663840
2410.00206
FPT Algorithms for Crossing Number Problems: A Unified Approach
<|reference_start|>FPT Algorithms for Crossing Number Problems: A Unified Approach: The basic crossing number problem is to determine the minimum number of crossings in a topological drawing of an input graph in the plane. In this paper, we develop fixed-parameter tractable (FPT) algorithms for various generalized crossing number problems in the plane or on surfaces. Our first result is on the color-constrained crossing problem, in which edges of the input graph G are colored, and one looks for a drawing of G in the plane or on a given surface in which the total number of crossings involving edges of colors i and j does not exceed a given upper bound Mij. We give an algorithm for this problem that is FPT in the total number of crossings allowed and the genus of the surface. It readily implies an FPT algorithm for the joint crossing number problem. We also give new FPT algorithms for several other graph drawing problems, such as the skewness, the edge crossing number, the splitting number, the gap-planar crossing number, and their generalizations to surfaces. Our algorithms are reductions to the embeddability of a graph on a two-dimensional simplicial complex, which admits an FPT algorithm by a result of Colin de Verdi\`ere and Magnard [ESA 2021].<|reference_end|>
arxiv
@article{de verdière2024fpt, title={FPT Algorithms for Crossing Number Problems: A Unified Approach}, author={'Eric Colin de Verdi`ere and Petr Hlinv{e}n'y}, journal={arXiv preprint arXiv:2410.00206}, year={2024}, archivePrefix={arXiv}, eprint={2410.00206}, primaryClass={cs.CG math.CO} }
de verdière2024fpt
arxiv-663841
2410.00207
Evaluating the performance of state-of-the-art esg domain-specific pre-trained large language models in text classification against existing models and traditional machine learning techniques
<|reference_start|>Evaluating the performance of state-of-the-art esg domain-specific pre-trained large language models in text classification against existing models and traditional machine learning techniques: This research investigates the classification of Environmental, Social, and Governance (ESG) information within textual disclosures. The aim is to develop and evaluate binary classification models capable of accurately identifying and categorizing E, S and G-related content respectively. The motivation for this research stems from the growing importance of ESG considerations in investment decisions and corporate accountability. Accurate and efficient classification of ESG information is crucial for stakeholders to understand the impact of companies on sustainability and to make informed decisions. The research uses a quantitative approach involving data collection, data preprocessing, and the development of ESG-focused Large Language Models (LLMs) and traditional machine learning (Support Vector Machines, XGBoost) classifiers. Performance evaluation guides iterative refinement until satisfactory metrics are achieved. The research compares traditional machine learning techniques (Support Vector Machines, XGBoost), state-of-the-art language model (FinBERT-ESG) and fine-tuned LLMs like Llama 2, by employing standard Natural Language Processing performance metrics such as accuracy, precision, recall, F1-score. A novel fine-tuning method, Qlora, is applied to LLMs, resulting in significant performance improvements across all ESG domains. The research also develops domain-specific fine-tuned models, such as EnvLlama 2-Qlora, SocLlama 2-Qlora, and GovLlama 2-Qlora, which demonstrate impressive results in ESG text classification.<|reference_end|>
arxiv
@article{chung2024evaluating, title={Evaluating the performance of state-of-the-art esg domain-specific pre-trained large language models in text classification against existing models and traditional machine learning techniques}, author={Tin Yuet Chung and Majid Latifi}, journal={arXiv preprint arXiv:2410.00207}, year={2024}, archivePrefix={arXiv}, eprint={2410.00207}, primaryClass={cs.CL} }
chung2024evaluating
arxiv-663842
2410.00208
A Data-Driven Approach To Preserve Safety and Reference Tracking for Constrained Cyber-Physical Systems Under Network Attacks
<|reference_start|>A Data-Driven Approach To Preserve Safety and Reference Tracking for Constrained Cyber-Physical Systems Under Network Attacks: This paper proposes a worst-case data-driven control architecture capable of ensuring the safety of constrained Cyber-Physical Systems under cyber-attacks while minimizing, whenever possible, potential degradation in tracking performance. To this end, a data-driven robust anomaly detector is designed to detect cyber-attack occurrences. Moreover, an add-on tracking supervisor module allows safe open-loop tracking control operations in case of unreliable measurements. On the plant side, a safety verification module and a local emergency controller are designed to manage severe attack scenarios that cannot be handled on the controller's side. These two modules resort to worst-case reachability and controllability data-driven arguments to detect potential unsafe scenarios and replace, whenever strictly needed, the tracking controller with emergency actions whose objective is to steer the plant's state trajectory in a predefined set of admissible and safe robust control invariant region until an attack-free scenario is restored. The effectiveness of the proposed solution has been shown through a simulation example.<|reference_end|>
arxiv
@article{attar2024a, title={A Data-Driven Approach To Preserve Safety and Reference Tracking for Constrained Cyber-Physical Systems Under Network Attacks}, author={Mehran Attar and Walter Lucia}, journal={arXiv preprint arXiv:2410.00208}, year={2024}, archivePrefix={arXiv}, eprint={2410.00208}, primaryClass={eess.SY cs.SY math.OC} }
attar2024a
arxiv-663843
2410.00209
Closed Repeats
<|reference_start|>Closed Repeats: Much research in stringology focuses on structures that can, in a way, ``grasp'' repeats (substrings that occur multiple times) as, for example, the so-called runs, a.k.a. maximal repetitions, compactly describe all tandem repeats. In this paper we introduce closed repeats: given a string $s$, its non-empty substring $s[i\,..\,j]$ is a right (left) closed repeat if its closest occurrence $s[i'\,..\,j']$ with $i' > i$ cannot be ``extended'' to the right (respectively, left) matching $s[j{+}1] = s[j'{+}1]$ (respectively, $s[i{-}1] = s[i'{-}1]$); the repeat is closed if it is both left and right closed. We note that the closed repeats correspond to the maximal closed substrings recently proposed by Badkobeh et al. and they include all runs as a special case. We prove that the number of right/left closed repeats is $O(n \log n)$, where $n$ is the length of $s$, and we show that this bound is tight. The (right/left) closed repeats can be computed in the optimal time $O(n\log n)$; as we prove, the computation time cannot be lower than $\Omega(n\log\sigma)$ over a general ordered alphabet of size $\sigma$ even when the number of the closed repeats is $O(n)$. As an application, we describe data structures using the closed repeats for a number of substring queries: finding the period of the substring provided it is ``periodic'', finding the longest repeat in the substring, computing the rightmost LZ77 parsing of the substring.<|reference_end|>
arxiv
@article{kosolobov2024closed, title={Closed Repeats}, author={Dmitry Kosolobov}, journal={arXiv preprint arXiv:2410.00209}, year={2024}, archivePrefix={arXiv}, eprint={2410.00209}, primaryClass={cs.DS} }
kosolobov2024closed
arxiv-663844
2410.00210
End-to-end Piano Performance-MIDI to Score Conversion with Transformers
<|reference_start|>End-to-end Piano Performance-MIDI to Score Conversion with Transformers: The automated creation of accurate musical notation from an expressive human performance is a fundamental task in computational musicology. To this end, we present an end-to-end deep learning approach that constructs detailed musical scores directly from real-world piano performance-MIDI files. We introduce a modern transformer-based architecture with a novel tokenized representation for symbolic music data. Framing the task as sequence-to-sequence translation rather than note-wise classification reduces alignment requirements and annotation costs, while allowing the prediction of more concise and accurate notation. To serialize symbolic music data, we design a custom tokenization stage based on compound tokens that carefully quantizes continuous values. This technique preserves more score information while reducing sequence lengths by $3.5\times$ compared to prior approaches. Using the transformer backbone, our method demonstrates better understanding of note values, rhythmic structure, and details such as staff assignment. When evaluated end-to-end using transcription metrics such as MUSTER, we achieve significant improvements over previous deep learning approaches and complex HMM-based state-of-the-art pipelines. Our method is also the first to directly predict notational details like trill marks or stem direction from performance data. Code and models are available at https://github.com/TimFelixBeyer/MIDI2ScoreTransformer<|reference_end|>
arxiv
@article{beyer2024end-to-end, title={End-to-end Piano Performance-MIDI to Score Conversion with Transformers}, author={Tim Beyer, Angela Dai}, journal={arXiv preprint arXiv:2410.00210}, year={2024}, archivePrefix={arXiv}, eprint={2410.00210}, primaryClass={cs.SD cs.LG eess.AS} }
beyer2024end-to-end
arxiv-663845
2410.00212
Transient subtraction: A control variate method for computing transport coefficients
<|reference_start|>Transient subtraction: A control variate method for computing transport coefficients: In molecular dynamics, transport coefficients measure the sensitivity of the invariant probability measure of the stochastic dynamics at hand with respect to some perturbation. They are typically computed using either the linear response of nonequilibrium dynamics, or the Green--Kubo formula. The estimators for both approaches have large variances, which motivates the study of variance reduction techniques for computing transport coefficients. We present an alternative approach, called the \emph{transient subtraction technique} (inspired by early work by Ciccotti and Jaccucci in 1975), which amounts to simulating a transient dynamics, from which we subtract a sensibly coupled equilibrium trajectory, resulting in an estimator with smaller variance. We present the mathematical formulation of the transient subtraction technique, give error estimates on the bias and variance of the associated estimator, and demonstrate the relevance of the method through numerical illustrations for various systems.<|reference_end|>
arxiv
@article{monmarché2024transient, title={Transient subtraction: A control variate method for computing transport coefficients}, author={Pierre Monmarch'e, Renato Spacek, Gabriel Stoltz}, journal={arXiv preprint arXiv:2410.00212}, year={2024}, archivePrefix={arXiv}, eprint={2410.00212}, primaryClass={math.NA cond-mat.stat-mech cs.NA} }
monmarché2024transient
arxiv-663846
2410.00215
Characterizing and Efficiently Accelerating Multimodal Generation Model Inference
<|reference_start|>Characterizing and Efficiently Accelerating Multimodal Generation Model Inference: Generative artificial intelligence (AI) technology is revolutionizing the computing industry. Not only its applications have broadened to various sectors but also poses new system design and optimization opportunities. The technology is capable of understanding and responding in multiple modalities. However, the advanced capability currently comes with significant system resource demands. To sustainably scale generative AI capabilities to billions of users in the world, inference must be fast and efficient. This paper pinpoints key system design and optimization opportunities by characterizing a family of emerging multi-modal generation models on real systems. Auto-regressive token generation is a critical latency performance bottleneck, typically dominated by GPU idle time. In addition to memory-intensive attention across the generative AI models, linear operations constitute significant inference latency due to the feed forward networks in Transformer-based models. We demonstrate that state-of-the-art optimization levers, spanning from applications to system software and hardware, set a 3.88x better baseline.<|reference_end|>
arxiv
@article{lee2024characterizing, title={Characterizing and Efficiently Accelerating Multimodal Generation Model Inference}, author={Yejin Lee, Anna Sun, Basil Hosmer, Bilge Acun, Can Balioglu, Changhan Wang, Charles David Hernandez, Christian Puhrsch, Daniel Haziza, Driss Guessous, Francisco Massa, Jacob Kahn, Jeffrey Wan, Jeremy Reizenstein, Jiaqi Zhai, Joe Isaacson, Joel Schlosser, Juan Pino, Kaushik Ram Sadagopan, Leonid Shamis, Linjian Ma, Min-Jae Hwang, Mingda Chen, Mostafa Elhoushi, Pedro Rodriguez, Ram Pasunuru, Scott Yih, Sravya Popuri, Xing Liu, and Carole-Jean Wu}, journal={arXiv preprint arXiv:2410.00215}, year={2024}, archivePrefix={arXiv}, eprint={2410.00215}, primaryClass={cs.LG} }
lee2024characterizing
arxiv-663847
2410.00218
T-KAER: Transparency-enhanced Knowledge-Augmented Entity Resolution Framework
<|reference_start|>T-KAER: Transparency-enhanced Knowledge-Augmented Entity Resolution Framework: Entity resolution (ER) is the process of determining whether two representations refer to the same real-world entity and plays a crucial role in data curation and data cleaning. Recent studies have introduced the KAER framework, aiming to improve pre-trained language models by augmenting external knowledge. However, identifying and documenting the external knowledge that is being augmented and understanding its contribution to the model's predictions have received little to no attention in the research community. This paper addresses this gap by introducing T-KAER, the Transparency-enhanced Knowledge-Augmented Entity Resolution framework. To enhance transparency, three Transparency-related Questions (T-Qs) have been proposed: T-Q(1): What is the experimental process for matching results based on data inputs? T-Q(2): Which semantic information does KAER augment in the raw data inputs? T-Q(3): Which semantic information of the augmented data inputs influences the predictions? To address the T-Qs, T-KAER is designed to improve transparency by documenting the entity resolution processes in log files. In experiments, a citation dataset is used to demonstrate the transparency components of T-KAER. This demonstration showcases how T-KAER facilitates error analysis from both quantitative and qualitative perspectives, providing evidence on "what" semantic information is augmented and "why" the augmented knowledge influences predictions differently.<|reference_end|>
arxiv
@article{li2024t-kaer:, title={T-KAER: Transparency-enhanced Knowledge-Augmented Entity Resolution Framework}, author={Lan Li, Liri Fang, Yiren Liu, Vetle I. Torvik, and Bertram Ludaescher}, journal={International Journal of Digital Curation 2024}, year={2024}, archivePrefix={arXiv}, eprint={2410.00218}, primaryClass={cs.CL cs.DB} }
li2024t-kaer:
arxiv-663848
2410.00222
Micromanipulation System for Microscale Magnetic Component Alignment and Assembly
<|reference_start|>Micromanipulation System for Microscale Magnetic Component Alignment and Assembly: This paper presents a contact-based micromanipulation system for the alignment and installment of microscale magnets into micro robots and devices. Affixing tweezers to a three degree of freedom micromanipulator allows for precise movement of objects. The use of non-magnetic tweezers permits the assembly of magnetized robots, and a magnetic rotating stage allows multiple magnets to be installed into one device in different orientations. By re-orienting the tweezers on the micromanipulator at defined ninety-degree angles, it is possible to assemble a device with magnets oriented in any direction on XY, XZ, and YZ planes. This system is highly precise and flexible, and can be implemented with minimal custom-made parts, making it ideal for development of new magnetic technologies at the microscale.<|reference_end|>
arxiv
@article{shindell2024micromanipulation, title={Micromanipulation System for Microscale Magnetic Component Alignment and Assembly}, author={Oliver J. Shindell, Aaron C. Davis, and David J. Cappelleri}, journal={arXiv preprint arXiv:2410.00222}, year={2024}, archivePrefix={arXiv}, eprint={2410.00222}, primaryClass={cs.RO} }
shindell2024micromanipulation
arxiv-663849
2410.00223
Koopman Operator in the Weighted Function Spaces and its Learning for the Estimation of Lyapunov and Zubov Functions
<|reference_start|>Koopman Operator in the Weighted Function Spaces and its Learning for the Estimation of Lyapunov and Zubov Functions: The mathematical properties and data-driven learning of the Koopman operator, which represents nonlinear dynamics as a linear mapping on a properly defined functional spaces, have become key problems in nonlinear system identification and control. However, Koopman operators that are approximately learned from snapshot data may not always accurately predict the system evolution on long horizons. In this work, by defining the Koopman operator on a space of weighted continuous functions and learning it on a weighted reproducing kernel Hilbert space, the Koopman operator is guaranteed to be contractive and the accumulation learning error is bounded. The weighting function, assumed to be known a priori, has an exponential decay with the flow or decays exponentially when compensated by an exponential factor. Under such a construction, the Koopman operator learned from data is used to estimate (i) Lyapunov functions for globally asymptotically stable dynamics, and (ii) Zubov-Lyapunov functions that characterize the domain of attraction. For these estimations, probabilistic bounds on the errors are derived.<|reference_end|>
arxiv
@article{tang2024koopman, title={Koopman Operator in the Weighted Function Spaces and its Learning for the Estimation of Lyapunov and Zubov Functions}, author={Wentao Tang}, journal={arXiv preprint arXiv:2410.00223}, year={2024}, archivePrefix={arXiv}, eprint={2410.00223}, primaryClass={eess.SY cs.SY math.DS} }
tang2024koopman
arxiv-663850
2410.00225
Probabilistic Classification of Near-Surface Shallow-Water Sediments using A Portable Free-Fall Penetrometer
<|reference_start|>Probabilistic Classification of Near-Surface Shallow-Water Sediments using A Portable Free-Fall Penetrometer: The geotechnical evaluation of seabed sediments is important for engineering projects and naval applications, offering valuable insights into sediment properties, behavior, and strength. Obtaining high-quality seabed samples can be a challenging task, making in-situ testing an essential part of site characterization. Free Fall Penetrometers (FFP) have emerged as robust tools for rapidly profiling seabed surface sediments, even in energetic nearshore or estuarine conditions and shallow as well as deep depths. While methods for interpretation of traditional offshore Cone Penetration Testing (CPT) data are well-established, their adaptation to FFP data is still an area of research. In this study, we introduce an innovative approach that utilizes machine learning algorithms to create a sediment behavior classification system based on portable free fall penetrometer (PFFP) data. The proposed model leverages PFFP measurements obtained from locations such as Sequim Bay (Washington), the Potomac River, and the York River (Virginia). The result shows 91.1\% accuracy in the class prediction, with the classes representing cohesionless sediment with little to no plasticity, cohesionless sediment with some plasticity, cohesive sediment with low plasticity, and cohesive sediment with high plasticity. The model prediction not only provides the predicted class but also yields an estimate of inherent uncertainty associated with the prediction, which can provide valuable insight about different sediment behaviors. These uncertainties typically range from very low to very high, with lower uncertainties being more common, but they can increase significantly dpending on variations in sediment composition, environmental conditions, and operational techniques. By quantifying uncertainty, the model offers a more comprehensive and informed approach to sediment classification.<|reference_end|>
arxiv
@article{rahman2024probabilistic, title={Probabilistic Classification of Near-Surface Shallow-Water Sediments using A Portable Free-Fall Penetrometer}, author={Md Rejwanur Rahman, Adrian Rodriguez-Marek, Nina Stark, Grace Massey, Carl Friedrichs, Kelly M. Dorgan}, journal={arXiv preprint arXiv:2410.00225}, year={2024}, archivePrefix={arXiv}, eprint={2410.00225}, primaryClass={cs.LG stat.AP} }
rahman2024probabilistic
arxiv-663851
2410.00229
Stochastic Inverse Problem: stability, regularization and Wasserstein gradient flow
<|reference_start|>Stochastic Inverse Problem: stability, regularization and Wasserstein gradient flow: Inverse problems in physical or biological sciences often involve recovering an unknown parameter that is random. The sought-after quantity is a probability distribution of the unknown parameter, that produces data that aligns with measurements. Consequently, these problems are naturally framed as stochastic inverse problems. In this paper, we explore three aspects of this problem: direct inversion, variational formulation with regularization, and optimization via gradient flows, drawing parallels with deterministic inverse problems. A key difference from the deterministic case is the space in which we operate. Here, we work within probability space rather than Euclidean or Sobolev spaces, making tools from measure transport theory necessary for the study. Our findings reveal that the choice of metric -- both in the design of the loss function and in the optimization process -- significantly impacts the stability and properties of the optimizer.<|reference_end|>
arxiv
@article{li2024stochastic, title={Stochastic Inverse Problem: stability, regularization and Wasserstein gradient flow}, author={Qin Li, Maria Oprea, Li Wang, Yunan Yang}, journal={arXiv preprint arXiv:2410.00229}, year={2024}, archivePrefix={arXiv}, eprint={2410.00229}, primaryClass={stat.ML cs.LG math.OC math.PR} }
li2024stochastic
arxiv-663852
2410.00231
Helpful DoggyBot: Open-World Object Fetching using Legged Robots and Vision-Language Models
<|reference_start|>Helpful DoggyBot: Open-World Object Fetching using Legged Robots and Vision-Language Models: Learning-based methods have achieved strong performance for quadrupedal locomotion. However, several challenges prevent quadrupeds from learning helpful indoor skills that require interaction with environments and humans: lack of end-effectors for manipulation, limited semantic understanding using only simulation data, and low traversability and reachability in indoor environments. We present a system for quadrupedal mobile manipulation in indoor environments. It uses a front-mounted gripper for object manipulation, a low-level controller trained in simulation using egocentric depth for agile skills like climbing and whole-body tilting, and pre-trained vision-language models (VLMs) with a third-person fisheye and an egocentric RGB camera for semantic understanding and command generation. We evaluate our system in two unseen environments without any real-world data collection or training. Our system can zero-shot generalize to these environments and complete tasks, like following user's commands to fetch a randomly placed stuff toy after climbing over a queen-sized bed, with a 60% success rate. Project website: https://helpful-doggybot.github.io/<|reference_end|>
arxiv
@article{wu2024helpful, title={Helpful DoggyBot: Open-World Object Fetching using Legged Robots and Vision-Language Models}, author={Qi Wu, Zipeng Fu, Xuxin Cheng, Xiaolong Wang, Chelsea Finn}, journal={arXiv preprint arXiv:2410.00231}, year={2024}, archivePrefix={arXiv}, eprint={2410.00231}, primaryClass={cs.RO cs.AI cs.CV cs.LG} }
wu2024helpful
arxiv-663853
2410.00232
Preconditioning for Accelerated Gradient Descent Optimization and Regularization
<|reference_start|>Preconditioning for Accelerated Gradient Descent Optimization and Regularization: Accelerated training algorithms, such as adaptive learning rates and various normalization methods, are widely used but not fully understood. When regularization is introduced, standard optimizers like adaptive learning rates may not perform effectively. This raises the need for alternative regularization approaches and the question of how to properly combine regularization with preconditioning. In this paper, we address these challenges using the theory of preconditioning as follows: (1) We explain how preconditioning with AdaGrad, RMSProp, and Adam accelerates training; (2) We explore the interaction between regularization and preconditioning, outlining different options for selecting the variables for regularization, and in particular we discuss how to implement that for the gradient regularization; and (3) We demonstrate how normalization methods accelerate training by improving Hessian conditioning, and discuss how this perspective can lead to new preconditioning training algorithms. Our findings offer a unified mathematical framework for understanding various acceleration techniques and deriving appropriate regularization schemes.<|reference_end|>
arxiv
@article{ye2024preconditioning, title={Preconditioning for Accelerated Gradient Descent Optimization and Regularization}, author={Qiang Ye}, journal={arXiv preprint arXiv:2410.00232}, year={2024}, archivePrefix={arXiv}, eprint={2410.00232}, primaryClass={cs.LG cs.NA math.NA stat.ML} }
ye2024preconditioning
arxiv-663854
2410.00233
Split Bregman Isotropic and Anisotropic Image Deblurring with Kronecker Product Sum Approximations using Single Precision Enlarged-GKB or RSVD Algorithms to provide low rank truncated SVDs
<|reference_start|>Split Bregman Isotropic and Anisotropic Image Deblurring with Kronecker Product Sum Approximations using Single Precision Enlarged-GKB or RSVD Algorithms to provide low rank truncated SVDs: We consider the solution of the $\ell_1$ regularized image deblurring problem using isotropic and anisotropic regularization implemented with the split Bregman algorithm. For large scale problems, we replace the system matrix $A$ using a Kronecker product approximation obtained via an approximate truncated singular value decomposition for the reordered matrix $\mathcal{R}(A)$. To obtain the approximate decomposition for $\mathcal{R}(A)$ we propose the enlarged Golub Kahan Bidiagonalization algorithm that proceeds by enlarging the Krylov subspace beyond either a given rank for the desired approximation, or uses an automatic stopping test that provides a suitable rank for the approximation. The resultant expansion is contrasted with the use of the truncated and the randomized singular value decompositions with the same number of terms. To further extend the scale of problem that can be considered we implement the determination of the approximation using single precision, while performing all steps for the regularization in standard double precision. The reported numerical tests demonstrate the effectiveness of applying the approximate single precision Kronecker product expansion for $A$, combined with either isotropic or anisotropic regularization implemented using the split Bregman algorithm, for the solution of image deblurring problems. As the size of the problem increases, our results demonstrate that the major costs are associated with determining the Kronecker product approximation, rather than with the cost of the regularization algorithm. Moreover, the enlarged Golub Kahan Bidiagonalization algorithm competes favorably with the randomized singular value decomposition for estimating the approximate singular value decomposition.<|reference_end|>
arxiv
@article{alsubhi2024split, title={Split Bregman Isotropic and Anisotropic Image Deblurring with Kronecker Product Sum Approximations using Single Precision Enlarged-GKB or RSVD Algorithms to provide low rank truncated SVDs}, author={Abdulmajeed Alsubhi and Rosemary Renaut}, journal={arXiv preprint arXiv:2410.00233}, year={2024}, archivePrefix={arXiv}, eprint={2410.00233}, primaryClass={math.NA cs.NA} }
alsubhi2024split
arxiv-663855
2410.00239
Modulation and Coding for NOMA and RSMA
<|reference_start|>Modulation and Coding for NOMA and RSMA: Next-generation multiple access (NGMA) serves as an umbrella term for transmission schemes distinct from conventional orthogonal methods. A key candidate of NGMA, non-orthogonal multiple access (NOMA), emerges as a solution to enhance connectivity by allowing multiple users to share time, frequency, and space concurrently. However, NOMA faces challenges in implementation, particularly in canceling inter-user interference. In this paper, we discuss the principles behind NOMA and review conventional NOMA methods. Then, to address these challenges, we present asynchronous transmission and interference-aware modulation techniques, enabling decoding without successive interference cancellation. The goal is to design constellations that dynamically adapt to interference, minimizing bit error rates (BERs) and enhancing user throughput in the presence of inter-user, inter-carrier, and inter-cell interference. The traditional link between minimizing BER and increasing spectral efficiency is explored, with deep autoencoders for end-to-end communication emerging as a potential solution to improve BERs. Interference-aware modulation can revolutionize constellation design for non-orthogonal channels. Rate-splitting multiple access (RSMA) is another promising interference management technique in multi-user systems. In addition to addressing challenges in finite-alphabet NOMA, this paper offers new insights and provides an overview of code-domain NOMA, trellis-coded NOMA, and RSMA as key NGMA candidates. We also discuss the evolution of channel coding toward low-latency communication and examine modulation and coding schemes in 5G networks. Finally, we highlight future research directions, emphasizing their importance for realizing NOMA from concept to functional technology.<|reference_end|>
arxiv
@article{jafarkhani2024modulation, title={Modulation and Coding for NOMA and RSMA}, author={Hamid Jafarkhani and Hossein Maleki and Mojtaba Vaezi}, journal={arXiv preprint arXiv:2410.00239}, year={2024}, archivePrefix={arXiv}, eprint={2410.00239}, primaryClass={cs.IT cs.LG math.IT} }
jafarkhani2024modulation
arxiv-663856
2410.00240
Demonstrating the Continual Learning Capabilities and Practical Application of Discrete-Time Active Inference
<|reference_start|>Demonstrating the Continual Learning Capabilities and Practical Application of Discrete-Time Active Inference: Active inference is a mathematical framework for understanding how agents (biological or artificial) interact with their environments, enabling continual adaptation and decision-making. It combines Bayesian inference and free energy minimization to model perception, action, and learning in uncertain and dynamic contexts. Unlike reinforcement learning, active inference integrates exploration and exploitation seamlessly by minimizing expected free energy. In this paper, we present a continual learning framework for agents operating in discrete time environments, using active inference as the foundation. We derive the mathematical formulations of variational and expected free energy and apply them to the design of a self-learning research agent. This agent updates its beliefs and adapts its actions based on new data without manual intervention. Through experiments in changing environments, we demonstrate the agent's ability to relearn and refine its models efficiently, making it suitable for complex domains like finance and healthcare. The paper concludes by discussing how the proposed framework generalizes to other systems, positioning active inference as a flexible approach for adaptive AI.<|reference_end|>
arxiv
@article{prakki2024demonstrating, title={Demonstrating the Continual Learning Capabilities and Practical Application of Discrete-Time Active Inference}, author={Rithvik Prakki}, journal={arXiv preprint arXiv:2410.00240}, year={2024}, archivePrefix={arXiv}, eprint={2410.00240}, primaryClass={cs.AI cs.LG} }
prakki2024demonstrating
arxiv-663857
2410.00242
Quantized and Asynchronous Federated Learning
<|reference_start|>Quantized and Asynchronous Federated Learning: Recent advances in federated learning have shown that asynchronous variants can be faster and more scalable than their synchronous counterparts. However, their design does not include quantization, which is necessary in practice to deal with the communication bottleneck. To bridge this gap, we develop a novel algorithm, Quantized Asynchronous Federated Learning (QAFeL), which introduces a hidden-state quantization scheme to avoid the error propagation caused by direct quantization. QAFeL also includes a buffer to aggregate client updates, ensuring scalability and compatibility with techniques such as secure aggregation. Furthermore, we prove that QAFeL achieves an $\mathcal{O}(1/\sqrt{T})$ ergodic convergence rate for stochastic gradient descent on non-convex objectives, which is the optimal order of complexity, without requiring bounded gradients or uniform client arrivals. We also prove that the cross-term error between staleness and quantization only affects the higher-order error terms. We validate our theoretical findings on standard benchmarks.<|reference_end|>
arxiv
@article{ortega2024quantized, title={Quantized and Asynchronous Federated Learning}, author={Tomas Ortega and Hamid Jafarkhani}, journal={arXiv preprint arXiv:2410.00242}, year={2024}, doi={10.1109/TCOMM.2024.3471996}, archivePrefix={arXiv}, eprint={2410.00242}, primaryClass={cs.LG eess.SP math.OC} }
ortega2024quantized
arxiv-663858
2410.00244
Quantifying the Dunkelflaute: An analysis of variable renewable energy droughts in Europe
<|reference_start|>Quantifying the Dunkelflaute: An analysis of variable renewable energy droughts in Europe: Variable renewable energy droughts, also referred to as "Dunkelflaute", emerge as a challenge for realizing climate-neutral energy systems based on variable wind and solar power. Using data on 38 historic weather years and an advanced identification method, we characterize European drought events for on- and offshore wind power, solar photovoltaics, and policy-relevant renewable technology portfolios. We show that drought characteristics heavily depend on the chosen threshold. Using single thresholds, as common in the literature, is thus not advisable. Applying a multi-threshold framework, we quantify how the complementarity of wind and solar power temporally and spatially alleviates drought frequency, duration, and severity within (portfolio effect) and across countries (balancing effect). We further identify the most extreme droughts and show how these drive major discharging periods of long-duration storage in a fully renewable European energy system. Such events comprise sequences of shorter, contiguous droughts of varying severity. In a perfectly interconnected Europe, the most extreme drought event occurred in winter 1996/97 and lasted 55~days. Yet, the average renewable portfolio availability during this event was still 47% of its long-run mean. As extreme droughts may span across the turn of years, single calendar year planning horizons are not suitable for modeling weather-resilient future energy scenarios.<|reference_end|>
arxiv
@article{kittel2024quantifying, title={Quantifying the Dunkelflaute: An analysis of variable renewable energy droughts in Europe}, author={Martin Kittel and Wolf-Peter Schill}, journal={arXiv preprint arXiv:2410.00244}, year={2024}, archivePrefix={arXiv}, eprint={2410.00244}, primaryClass={eess.SY cs.SY physics.ao-ph} }
kittel2024quantifying
arxiv-663859
2410.00249
Enhancing Pre-Trained Language Models for Vulnerability Detection via Semantic-Preserving Data Augmentation
<|reference_start|>Enhancing Pre-Trained Language Models for Vulnerability Detection via Semantic-Preserving Data Augmentation: With the rapid development and widespread use of advanced network systems, software vulnerabilities pose a significant threat to secure communications and networking. Learning-based vulnerability detection systems, particularly those leveraging pre-trained language models, have demonstrated significant potential in promptly identifying vulnerabilities in communication networks and reducing the risk of exploitation. However, the shortage of accurately labeled vulnerability datasets hinders further progress in this field. Failing to represent real-world vulnerability data variety and preserve vulnerability semantics, existing augmentation approaches provide limited or even counterproductive contributions to model training. In this paper, we propose a data augmentation technique aimed at enhancing the performance of pre-trained language models for vulnerability detection. Given the vulnerability dataset, our method performs natural semantic-preserving program transformation to generate a large volume of new samples with enriched data diversity and variety. By incorporating our augmented dataset in fine-tuning a series of representative code pre-trained models (i.e., CodeBERT, GraphCodeBERT, UnixCoder, and PDBERT), up to 10.1% increase in accuracy and 23.6% increase in F1 can be achieved in the vulnerability detection task. Comparison results also show that our proposed method can substantially outperform other prominent vulnerability augmentation approaches.<|reference_end|>
arxiv
@article{qi2024enhancing, title={Enhancing Pre-Trained Language Models for Vulnerability Detection via Semantic-Preserving Data Augmentation}, author={Weiliang Qi, Jiahao Cao, Darsh Poddar, Sophia Li, Xinda Wang}, journal={arXiv preprint arXiv:2410.00249}, year={2024}, archivePrefix={arXiv}, eprint={2410.00249}, primaryClass={cs.CR cs.SE} }
qi2024enhancing
arxiv-663860
2410.00250
A Methodology for Explainable Large Language Models with Integrated Gradients and Linguistic Analysis in Text Classification
<|reference_start|>A Methodology for Explainable Large Language Models with Integrated Gradients and Linguistic Analysis in Text Classification: Neurological disorders that affect speech production, such as Alzheimer's Disease (AD), significantly impact the lives of both patients and caregivers, whether through social, psycho-emotional effects or other aspects not yet fully understood. Recent advancements in Large Language Model (LLM) architectures have developed many tools to identify representative features of neurological disorders through spontaneous speech. However, LLMs typically lack interpretability, meaning they do not provide clear and specific reasons for their decisions. Therefore, there is a need for methods capable of identifying the representative features of neurological disorders in speech and explaining clearly why these features are relevant. This paper presents an explainable LLM method, named SLIME (Statistical and Linguistic Insights for Model Explanation), capable of identifying lexical components representative of AD and indicating which components are most important for the LLM's decision. In developing this method, we used an English-language dataset consisting of transcriptions from the Cookie Theft picture description task. The LLM Bidirectional Encoder Representations from Transformers (BERT) classified the textual descriptions as either AD or control groups. To identify representative lexical features and determine which are most relevant to the model's decision, we used a pipeline involving Integrated Gradients (IG), Linguistic Inquiry and Word Count (LIWC), and statistical analysis. Our method demonstrates that BERT leverages lexical components that reflect a reduction in social references in AD and identifies which further improve the LLM's accuracy. Thus, we provide an explainability tool that enhances confidence in applying LLMs to neurological clinical contexts, particularly in the study of neurodegeneration.<|reference_end|>
arxiv
@article{ribeiro2024a, title={A Methodology for Explainable Large Language Models with Integrated Gradients and Linguistic Analysis in Text Classification}, author={Marina Ribeiro (1 and 2), B'arbara Malcorra (2), Nat'alia B. Mota (2 and 3), Rodrigo Wilkens (4 and 5), Aline Villavicencio (5 and 6) Lilian C. Hubner (7), C'esar Renn'o-Costa (1) ((1) Bioinformatics Multidisciplinary Environment (BioME), Digital Metropolis Institute (IMD), Federal University of Rio Grande do Norte (UFRN), Natal (RN), Brazil, (2) Research Department at Mobile Brain, Mobile Brain, Rio de Janeiro (RJ), Brazil, (3) Institute of Psychiatry (IPUB), Federal University of Rio de Janeiro (UFRJ), Rio de Janeiro (RJ), Brazil, (4) Department of Computer Science, The University of Exeter, Exeter, UK, (5) Institute for Data Science and Artificial Intelligence at the University of Exeter, Exeter, UK, (6) Department of Computer Science, The University of Sheffield, Sheffield, UK, (7) School of Humanities, Pontifical Catholic University of Rio Grande do Sul (PUCRS), Porto Alegre (RS), Brazil)}, journal={arXiv preprint arXiv:2410.00250}, year={2024}, archivePrefix={arXiv}, eprint={2410.00250}, primaryClass={cs.CL} }
ribeiro2024a
arxiv-663861
2410.00253
MM-Conv: A Multi-modal Conversational Dataset for Virtual Humans
<|reference_start|>MM-Conv: A Multi-modal Conversational Dataset for Virtual Humans: In this paper, we present a novel dataset captured using a VR headset to record conversations between participants within a physics simulator (AI2-THOR). Our primary objective is to extend the field of co-speech gesture generation by incorporating rich contextual information within referential settings. Participants engaged in various conversational scenarios, all based on referential communication tasks. The dataset provides a rich set of multimodal recordings such as motion capture, speech, gaze, and scene graphs. This comprehensive dataset aims to enhance the understanding and development of gesture generation models in 3D scenes by providing diverse and contextually rich data.<|reference_end|>
arxiv
@article{deichler2024mm-conv:, title={MM-Conv: A Multi-modal Conversational Dataset for Virtual Humans}, author={Anna Deichler, Jim O'Regan, Jonas Beskow}, journal={arXiv preprint arXiv:2410.00253}, year={2024}, archivePrefix={arXiv}, eprint={2410.00253}, primaryClass={cs.CV cs.CL cs.GR cs.HC} }
deichler2024mm-conv:
arxiv-663862
2410.00255
Robin3D: Improving 3D Large Language Model via Robust Instruction Tuning
<|reference_start|>Robin3D: Improving 3D Large Language Model via Robust Instruction Tuning: Recent advancements in 3D Large Language Models (3DLLMs) have highlighted their potential in building general-purpose agents in the 3D real world, yet challenges remain due to the lack of high-quality robust instruction-following data, leading to limited discriminative power and generalization of 3DLLMs. In this paper, we introduce Robin3D, a powerful 3DLLM trained on large-scale instruction-following data generated by our novel data engine, Robust Instruction Generation (RIG) engine. RIG generates two key instruction data: 1) the Adversarial Instruction-following data, which features mixed negative and positive samples to enhance the model's discriminative understanding. 2) the Diverse Instruction-following data, which contains various instruction styles to enhance model's generalization. As a result, we construct 1 million instruction-following data, consisting of 344K Adversarial samples, 508K Diverse samples, and 165K benchmark training set samples. To better handle these complex instructions, Robin3D first incorporates Relation-Augmented Projector to enhance spatial understanding, and then strengthens the object referring and grounding ability through ID-Feature Bonding. Robin3D consistently outperforms previous methods across five widely-used 3D multimodal learning benchmarks, without the need for task-specific fine-tuning. Notably, we achieve a 7.8\% improvement in the grounding task (Multi3DRefer) and a 6.9\% improvement in the captioning task (Scan2Cap).<|reference_end|>
arxiv
@article{kang2024robin3d:, title={Robin3D: Improving 3D Large Language Model via Robust Instruction Tuning}, author={Weitai Kang, Haifeng Huang, Yuzhang Shang, Mubarak Shah, Yan Yan}, journal={arXiv preprint arXiv:2410.00255}, year={2024}, archivePrefix={arXiv}, eprint={2410.00255}, primaryClass={cs.AI cs.CL cs.CV} }
kang2024robin3d:
arxiv-663863
2410.00256
Enhanced Credit Score Prediction Using Ensemble Deep Learning Model
<|reference_start|>Enhanced Credit Score Prediction Using Ensemble Deep Learning Model: In contemporary economic society, credit scores are crucial for every participant. A robust credit evaluation system is essential for the profitability of core businesses such as credit cards, loans, and investments for commercial banks and the financial sector. This paper combines high-performance models like XGBoost and LightGBM, already widely used in modern banking systems, with the powerful TabNet model. We have developed a potent model capable of accurately determining credit score levels by integrating Random Forest, XGBoost, and TabNet, and through the stacking technique in ensemble modeling. This approach surpasses the limitations of single models and significantly advances the precise credit score prediction. In the following sections, we will explain the techniques we used and thoroughly validate our approach by comprehensively comparing a series of metrics such as Precision, Recall, F1, and AUC. By integrating Random Forest, XGBoost, and with the TabNet deep learning architecture, these models complement each other, demonstrating exceptionally strong overall performance.<|reference_end|>
arxiv
@article{xing2024enhanced, title={Enhanced Credit Score Prediction Using Ensemble Deep Learning Model}, author={Qianwen Xing, Chang Yu, Sining Huang, Qi Zheng, Xingyu Mu, Mengying Sun}, journal={arXiv preprint arXiv:2410.00256}, year={2024}, doi={10.23977/jaip.2024.070316}, archivePrefix={arXiv}, eprint={2410.00256}, primaryClass={cs.LG} }
xing2024enhanced
arxiv-663864
2410.00257
The age of spiritual machines: Language quietus induces synthetic altered states of consciousness in artificial intelligence
<|reference_start|>The age of spiritual machines: Language quietus induces synthetic altered states of consciousness in artificial intelligence: How is language related to consciousness? Language functions to categorise perceptual experiences (e.g., labelling interoceptive states as 'happy') and higher-level constructs (e.g., using 'I' to represent the narrative self). Psychedelic use and meditation might be described as altered states that impair or intentionally modify the capacity for linguistic categorisation. For example, psychedelic phenomenology is often characterised by 'oceanic boundlessness' or 'unity' and 'ego dissolution', which might be expected of a system unburdened by entrenched language categories. If language breakdown plays a role in producing such altered behaviour, multimodal artificial intelligence might align more with these phenomenological descriptions when attention is shifted away from language. We tested this hypothesis by comparing the semantic embedding spaces from simulated altered states after manipulating attentional weights in CLIP and FLAVA models to embedding spaces from altered states questionnaires before manipulation. Compared to random text and various other altered states including anxiety, models were more aligned with disembodied, ego-less, spiritual, and unitive states, as well as minimal phenomenal experiences, with decreased attention to language and vision. Reduced attention to language was associated with distinct linguistic patterns and blurred embeddings within and, especially, across semantic categories (e.g., 'giraffes' become more like 'bananas'). These results lend support to the role of language categorisation in the phenomenology of altered states of consciousness, like those experienced with high doses of psychedelics or concentration meditation, states that often lead to improved mental health and wellbeing.<|reference_end|>
arxiv
@article{skipper2024the, title={The age of spiritual machines: Language quietus induces synthetic altered states of consciousness in artificial intelligence}, author={Jeremy I Skipper, Joanna Kuc, Greg Cooper, and Christopher Timmermann}, journal={arXiv preprint arXiv:2410.00257}, year={2024}, archivePrefix={arXiv}, eprint={2410.00257}, primaryClass={q-bio.NC cs.AI cs.CL} }
skipper2024the
arxiv-663865
2410.00258
Possible principles for aligned structure learning agents
<|reference_start|>Possible principles for aligned structure learning agents: This paper offers a roadmap for the development of scalable aligned artificial intelligence (AI) from first principle descriptions of natural intelligence. In brief, a possible path toward scalable aligned AI rests upon enabling artificial agents to learn a good model of the world that includes a good model of our preferences. For this, the main objective is creating agents that learn to represent the world and other agents' world models; a problem that falls under structure learning (a.k.a. causal representation learning). We expose the structure learning and alignment problems with this goal in mind, as well as principles to guide us forward, synthesizing various ideas across mathematics, statistics, and cognitive science. 1) We discuss the essential role of core knowledge, information geometry and model reduction in structure learning, and suggest core structural modules to learn a wide range of naturalistic worlds. 2) We outline a way toward aligned agents through structure learning and theory of mind. As an illustrative example, we mathematically sketch Asimov's Laws of Robotics, which prescribe agents to act cautiously to minimize the ill-being of other agents. We supplement this example by proposing refined approaches to alignment. These observations may guide the development of artificial intelligence in helping to scale existing -- or design new -- aligned structure learning systems.<|reference_end|>
arxiv
@article{da costa2024possible, title={Possible principles for aligned structure learning agents}, author={Lancelot Da Costa, Tom'av{s} Gavenv{c}iak, David Hyland, Mandana Samiei, Cristian Dragos-Manta, Candice Pattisapu, Adeel Razi, Karl Friston}, journal={arXiv preprint arXiv:2410.00258}, year={2024}, archivePrefix={arXiv}, eprint={2410.00258}, primaryClass={cs.AI q-bio.NC} }
da costa2024possible
arxiv-663866
2410.00260
DoPAMine: Domain-specific Pre-training Adaptation from seed-guided data Mining
<|reference_start|>DoPAMine: Domain-specific Pre-training Adaptation from seed-guided data Mining: Large Language Models (LLMs) have shown remarkable ability to generalize effectively across numerous industry domains while executing a range of tasks. Many of these competencies are obtained from the data utilized during the pre-training phase of the Language Models (LMs). However, these models exhibit limitations when tasked with performing in specialized or low-resource industry domains. More recent approaches use LLMs for generating domain-specific synthetic data but most often they lack in truthfulness and complexity. Alternatively, in cases where domain data is available like healthcare and finance most of the LMs are proprietary necessitating the need for a scalable method to curate real world industry specific pre-training data. In this work, we propose an automated and scalable framework - DoPAMine:Domain-specific Pre-training Adaptation from seed-guided data Mining, to mine domain specific training data from a large data corpus for domain adaptation of a LM. The framework leverages the parametric knowledge of a LLM to generate diverse and representative seed data tailored to a specific domain which is then used to mine real world data from a large data corpus like Common Crawl. We evaluated our framework's performance in the continual pre-training (CPT) setting by training two domain specific 7B parameter LMs in healthcare and finance with data mined via DoPAMine. Our experiments show that DoPAMine boosts the performance of pre-trained LLMs on average by 4.9% and 5.1% in zero-shot and 5-shot settings respectively on healthcare tasks from MMLU, MedQA, MedMCQA and PubMedQA datasets, and 2.9% and 6.7% for zero-shot and 5-shot settings respectively on finance tasks from FiQA-SA, FPB and Headlines datasets when compared to the baseline.<|reference_end|>
arxiv
@article{arannil2024dopamine:, title={DoPAMine: Domain-specific Pre-training Adaptation from seed-guided data Mining}, author={Vinayak Arannil, Neha Narwal, Sourav Sanjukta Bhabesh, Sai Nikhil Thirandas, Darren Yow-Bang Wang, Graham Horwood, Alex Anto Chirayath, Gouri Pandeshwar}, journal={arXiv preprint arXiv:2410.00260}, year={2024}, archivePrefix={arXiv}, eprint={2410.00260}, primaryClass={cs.CL cs.AI cs.LG} }
arannil2024dopamine:
arxiv-663867
2410.00261
Object-Centric Kinodynamic Planning for Nonprehensile Robot Rearrangement Manipulation
<|reference_start|>Object-Centric Kinodynamic Planning for Nonprehensile Robot Rearrangement Manipulation: Nonprehensile actions such as pushing are crucial for addressing multi-object rearrangement problems. To date, existing nonprehensile solutions are all robot-centric, i.e., the manipulation actions are generated with robot-relevant intent and their outcomes are passively evaluated afterwards. Such pipelines are very different from human strategies and are typically inefficient. To this end, this work proposes a novel object-centric planning paradigm and develops the first object-centric planner for general nonprehensile rearrangement problems. By assuming that each object can actively move without being driven by robot interactions, the object-centric planner focuses on planning desired object motions, which are realized via robot actions generated online via a closed-loop pushing strategy. Through extensive experiments and in comparison with state-of-the-art baselines in both simulation and on a physical robot, we show that our object-centric paradigm can generate more intuitive and task-effective robot actions with significantly improved efficiency. In addition, we propose a benchmarking protocol to standardize and facilitate future research in nonprehensile rearrangement.<|reference_end|>
arxiv
@article{ren2024object-centric, title={Object-Centric Kinodynamic Planning for Nonprehensile Robot Rearrangement Manipulation}, author={Kejia Ren, Gaotian Wang, Andrew S. Morgan, Lydia E. Kavraki, and Kaiyu Hang}, journal={arXiv preprint arXiv:2410.00261}, year={2024}, archivePrefix={arXiv}, eprint={2410.00261}, primaryClass={cs.RO} }
ren2024object-centric
arxiv-663868
2410.00262
ImmersePro: End-to-End Stereo Video Synthesis Via Implicit Disparity Learning
<|reference_start|>ImmersePro: End-to-End Stereo Video Synthesis Via Implicit Disparity Learning: We introduce \textit{ImmersePro}, an innovative framework specifically designed to transform single-view videos into stereo videos. This framework utilizes a novel dual-branch architecture comprising a disparity branch and a context branch on video data by leveraging spatial-temporal attention mechanisms. \textit{ImmersePro} employs implicit disparity guidance, enabling the generation of stereo pairs from video sequences without the need for explicit disparity maps, thus reducing potential errors associated with disparity estimation models. In addition to the technical advancements, we introduce the YouTube-SBS dataset, a comprehensive collection of 423 stereo videos sourced from YouTube. This dataset is unprecedented in its scale, featuring over 7 million stereo pairs, and is designed to facilitate training and benchmarking of stereo video generation models. Our experiments demonstrate the effectiveness of \textit{ImmersePro} in producing high-quality stereo videos, offering significant improvements over existing methods. Compared to the best competitor stereo-from-mono we quantitatively improve the results by 11.76\% (L1), 6.39\% (SSIM), and 5.10\% (PSNR).<|reference_end|>
arxiv
@article{shi2024immersepro:, title={ImmersePro: End-to-End Stereo Video Synthesis Via Implicit Disparity Learning}, author={Jian Shi, Zhenyu Li, Peter Wonka}, journal={arXiv preprint arXiv:2410.00262}, year={2024}, archivePrefix={arXiv}, eprint={2410.00262}, primaryClass={cs.CV} }
shi2024immersepro:
arxiv-663869
2410.00263
Procedure-Aware Surgical Video-language Pretraining with Hierarchical Knowledge Augmentation
<|reference_start|>Procedure-Aware Surgical Video-language Pretraining with Hierarchical Knowledge Augmentation: Surgical video-language pretraining (VLP) faces unique challenges due to the knowledge domain gap and the scarcity of multi-modal data. This study aims to bridge the gap by addressing issues regarding textual information loss in surgical lecture videos and the spatial-temporal challenges of surgical VLP. We propose a hierarchical knowledge augmentation approach and a novel Procedure-Encoded Surgical Knowledge-Augmented Video-Language Pretraining (PeskaVLP) framework to tackle these issues. The knowledge augmentation uses large language models (LLM) for refining and enriching surgical concepts, thus providing comprehensive language supervision and reducing the risk of overfitting. PeskaVLP combines language supervision with visual self-supervision, constructing hard negative samples and employing a Dynamic Time Warping (DTW) based loss function to effectively comprehend the cross-modal procedural alignment. Extensive experiments on multiple public surgical scene understanding and cross-modal retrieval datasets show that our proposed method significantly improves zero-shot transferring performance and offers a generalist visual representation for further advancements in surgical scene understanding.<|reference_end|>
arxiv
@article{yuan2024procedure-aware, title={Procedure-Aware Surgical Video-language Pretraining with Hierarchical Knowledge Augmentation}, author={Kun Yuan, Vinkle Srivastav, Nassir Navab, Nicolas Padoy}, journal={arXiv preprint arXiv:2410.00263}, year={2024}, archivePrefix={arXiv}, eprint={2410.00263}, primaryClass={cs.CV cs.AI} }
yuan2024procedure-aware
arxiv-663870
2410.00266
Class-Agnostic Visio-Temporal Scene Sketch Semantic Segmentation
<|reference_start|>Class-Agnostic Visio-Temporal Scene Sketch Semantic Segmentation: Scene sketch semantic segmentation is a crucial task for various applications including sketch-to-image retrieval and scene understanding. Existing sketch segmentation methods treat sketches as bitmap images, leading to the loss of temporal order among strokes due to the shift from vector to image format. Moreover, these methods struggle to segment objects from categories absent in the training data. In this paper, we propose a Class-Agnostic Visio-Temporal Network (CAVT) for scene sketch semantic segmentation. CAVT employs a class-agnostic object detector to detect individual objects in a scene and groups the strokes of instances through its post-processing module. This is the first approach that performs segmentation at both the instance and stroke levels within scene sketches. Furthermore, there is a lack of free-hand scene sketch datasets with both instance and stroke-level class annotations. To fill this gap, we collected the largest Free-hand Instance- and Stroke-level Scene Sketch Dataset (FrISS) that contains 1K scene sketches and covers 403 object classes with dense annotations. Extensive experiments on FrISS and other datasets demonstrate the superior performance of our method over state-of-the-art scene sketch segmentation models. The code and dataset will be made public after acceptance.<|reference_end|>
arxiv
@article{kütük2024class-agnostic, title={Class-Agnostic Visio-Temporal Scene Sketch Semantic Segmentation}, author={Aleyna K"ut"uk, Tevfik Metin Sezgin}, journal={arXiv preprint arXiv:2410.00266}, year={2024}, archivePrefix={arXiv}, eprint={2410.00266}, primaryClass={cs.CV cs.LG} }
kütük2024class-agnostic
arxiv-663871
2410.00267
KPCA-CAM: Visual Explainability of Deep Computer Vision Models using Kernel PCA
<|reference_start|>KPCA-CAM: Visual Explainability of Deep Computer Vision Models using Kernel PCA: Deep learning models often function as black boxes, providing no straightforward reasoning for their predictions. This is particularly true for computer vision models, which process tensors of pixel values to generate outcomes in tasks such as image classification and object detection. To elucidate the reasoning of these models, class activation maps (CAMs) are used to highlight salient regions that influence a model's output. This research introduces KPCA-CAM, a technique designed to enhance the interpretability of Convolutional Neural Networks (CNNs) through improved class activation maps. KPCA-CAM leverages Principal Component Analysis (PCA) with the kernel trick to capture nonlinear relationships within CNN activations more effectively. By mapping data into higher-dimensional spaces with kernel functions and extracting principal components from this transformed hyperplane, KPCA-CAM provides more accurate representations of the underlying data manifold. This enables a deeper understanding of the features influencing CNN decisions. Empirical evaluations on the ILSVRC dataset across different CNN models demonstrate that KPCA-CAM produces more precise activation maps, providing clearer insights into the model's reasoning compared to existing CAM algorithms. This research advances CAM techniques, equipping researchers and practitioners with a powerful tool to gain deeper insights into CNN decision-making processes and overall behaviors.<|reference_end|>
arxiv
@article{karmani2024kpca-cam:, title={KPCA-CAM: Visual Explainability of Deep Computer Vision Models using Kernel PCA}, author={Sachin Karmani, Thanushon Sivakaran, Gaurav Prasad, Mehmet Ali, Wenbo Yang, Sheyang Tang}, journal={2024 IEEE 26th International Workshop on Multimedia Signal Processing (MMSP)}, year={2024}, archivePrefix={arXiv}, eprint={2410.00267}, primaryClass={cs.CV cs.AI} }
karmani2024kpca-cam:
arxiv-663872
2410.00270
Real-time Diverse Motion In-betweening with Space-time Control
<|reference_start|>Real-time Diverse Motion In-betweening with Space-time Control: In this work, we present a data-driven framework for generating diverse in-betweening motions for kinematic characters. Our approach injects dynamic conditions and explicit motion controls into the procedure of motion transitions. Notably, this integration enables a finer-grained spatial-temporal control by allowing users to impart additional conditions, such as duration, path, style, etc., into the in-betweening process. We demonstrate that our in-betweening approach can synthesize both locomotion and unstructured motions, enabling rich, versatile, and high-quality animation generation.<|reference_end|>
arxiv
@article{chu2024real-time, title={Real-time Diverse Motion In-betweening with Space-time Control}, author={Yuchen Chu, Zeshi Yang}, journal={arXiv preprint arXiv:2410.00270}, year={2024}, doi={10.1145/3677388.3696327}, archivePrefix={arXiv}, eprint={2410.00270}, primaryClass={cs.GR cs.LG} }
chu2024real-time
arxiv-663873
2410.00271
GalaxiesML: a dataset of galaxy images, photometry, redshifts, and structural parameters for machine learning
<|reference_start|>GalaxiesML: a dataset of galaxy images, photometry, redshifts, and structural parameters for machine learning: We present a dataset built for machine learning applications consisting of galaxy photometry, images, spectroscopic redshifts, and structural properties. This dataset comprises 286,401 galaxy images and photometry from the Hyper-Suprime-Cam Survey PDR2 in five imaging filters ($g,r,i,z,y$) with spectroscopically confirmed redshifts as ground truth. Such a dataset is important for machine learning applications because it is uniform, consistent, and has minimal outliers but still contains a realistic range of signal-to-noise ratios. We make this dataset public to help spur development of machine learning methods for the next generation of surveys such as Euclid and LSST. The aim of GalaxiesML is to provide a robust dataset that can be used not only for astrophysics but also for machine learning, where image properties cannot be validated by the human eye and are instead governed by physical laws. We describe the challenges associated with putting together a dataset from publicly available archives, including outlier rejection, duplication, establishing ground truths, and sample selection. This is one of the largest public machine learning-ready training sets of its kind with redshifts ranging from 0.01 to 4. The redshift distribution of this sample peaks at redshift of 1.5 and falls off rapidly beyond redshift 2.5. We also include an example application of this dataset for redshift estimation, demonstrating that using images for redshift estimation produces more accurate results compared to using photometry alone. For example, the bias in redshift estimate is a factor of 10 lower when using images between redshift of 0.1 to 1.25 compared to photometry alone. Results from dataset such as this will help inform us on how to best make use of data from the next generation of galaxy surveys.<|reference_end|>
arxiv
@article{do2024galaxiesml:, title={GalaxiesML: a dataset of galaxy images, photometry, redshifts, and structural parameters for machine learning}, author={Tuan Do (1), Bernie Boscoe (2), Evan Jones (1), Yun Qi Li (1,3), Kevin Alfaro (1) ((1) UCLA, (2) Southern Oregon University, (3) University of Washington)}, journal={arXiv preprint arXiv:2410.00271}, year={2024}, archivePrefix={arXiv}, eprint={2410.00271}, primaryClass={astro-ph.CO astro-ph.IM cs.LG} }
do2024galaxiesml:
arxiv-663874
2410.00272
Decentralized Input and State Estimation for Multi-agent System with Dynamic Topology and Heterogeneous Sensor Network
<|reference_start|>Decentralized Input and State Estimation for Multi-agent System with Dynamic Topology and Heterogeneous Sensor Network: A crucial challenge in decentralized systems is state estimation in the presence of unknown inputs, particularly within heterogeneous sensor networks with dynamic topologies. While numerous consensus algorithms have been introduced, they often require extensive information exchange or multiple communication iterations to ensure estimation accuracy. This paper proposes an efficient algorithm that achieves an unbiased and optimal solution comparable to filters with full information about other agents. This is accomplished through the use of information filter decomposition and the fusion of inputs via covariance intersection. Our method requires only a single communication iteration for exchanging individual estimates between agents, instead of multiple rounds of information exchange, thus preserving agents' privacy by avoiding the sharing of explicit observations and system equations. Furthermore, to address the challenges posed by dynamic communication topologies, we propose two practical strategies to handle issues arising from intermittent observations and incomplete state estimation, thereby enhancing the robustness and accuracy of the estimation process. Experiments and ablation studies conducted in both stationary and dynamic environments demonstrate the superiority of our algorithm over other baselines. Notably, it performs as well as, or even better than, algorithms that have a global view of all neighbors.<|reference_end|>
arxiv
@article{wu2024decentralized, title={Decentralized Input and State Estimation for Multi-agent System with Dynamic Topology and Heterogeneous Sensor Network}, author={Zida Wu, Ankur Mehta}, journal={arXiv preprint arXiv:2410.00272}, year={2024}, archivePrefix={arXiv}, eprint={2410.00272}, primaryClass={eess.SY cs.MA cs.RO cs.SY eess.SP} }
wu2024decentralized
arxiv-663875
2410.00273
Comprehensive Performance Modeling and System Design Insights for Foundation Models
<|reference_start|>Comprehensive Performance Modeling and System Design Insights for Foundation Models: Generative AI, in particular large transformer models, are increasingly driving HPC system design in science and industry. We analyze performance characteristics of such transformer models and discuss their sensitivity to the transformer type, parallelization strategy, and HPC system features (accelerators and interconnects). We utilize a performance model that allows us to explore this complex design space and highlight its key components. We find that different transformer types demand different parallelism and system characteristics at different training regimes. Large Language Models are performant with 3D parallelism and amplify network needs only at pre-training scales with reduced dependence on accelerator capacity and bandwidth. On the other hand, long-sequence transformers, representative of scientific foundation models, place a more uniform dependence on network and capacity with necessary 4D parallelism. Our analysis emphasizes the need for closer performance modeling of different transformer types keeping system features in mind and demonstrates a path towards this. Our code is available as open-source.<|reference_end|>
arxiv
@article{subramanian2024comprehensive, title={Comprehensive Performance Modeling and System Design Insights for Foundation Models}, author={Shashank Subramanian, Ermal Rrapaj, Peter Harrington, Smeet Chheda, Steven Farrell, Brian Austin, Samuel Williams, Nicholas Wright, Wahid Bhimji}, journal={arXiv preprint arXiv:2410.00273}, year={2024}, archivePrefix={arXiv}, eprint={2410.00273}, primaryClass={cs.LG cs.DC} }
subramanian2024comprehensive
arxiv-663876
2410.00274
Social Conjuring: Multi-User Runtime Collaboration with AI in Building Virtual 3D Worlds
<|reference_start|>Social Conjuring: Multi-User Runtime Collaboration with AI in Building Virtual 3D Worlds: Generative artificial intelligence has shown promise in prompting virtual worlds into existence, yet little attention has been given to understanding how this process unfolds as social interaction. We present Social Conjurer, a framework for AI-augmented dynamic 3D scene co-creation, where multiple users collaboratively build and modify virtual worlds in real-time. Through an expanded set of interactions, including social and tool-based engagements as well as spatial reasoning, our framework facilitates the creation of rich, diverse virtual environments. Findings from a preliminary user study (N=12) provide insight into the user experience of this approach, how social contexts shape the prompting of spatial environments, and perspective on social applications of prompt-based 3D co-creation. In addition to highlighting the potential of AI-supported multi-user world creation and offering new pathways for AI-augmented creative processes in VR, this article presents a set of implications for designing human-centered interfaces that incorporate AI models into 3D content generation.<|reference_end|>
arxiv
@article{kobenova2024social, title={Social Conjuring: Multi-User Runtime Collaboration with AI in Building Virtual 3D Worlds}, author={Amina Kobenova, Cyan DeVeaux, Samyak Parajuli, Andrzej Banburski-Fahey, Judith Amores Fernandez, Jaron Lanier}, journal={arXiv preprint arXiv:2410.00274}, year={2024}, archivePrefix={arXiv}, eprint={2410.00274}, primaryClass={cs.HC cs.AI cs.CL cs.ET} }
kobenova2024social
arxiv-663877
2410.00275
On Large Uni- and Multi-modal Models for Unsupervised Classification of Social Media Images: Nature's Contribution to People as case study
<|reference_start|>On Large Uni- and Multi-modal Models for Unsupervised Classification of Social Media Images: Nature's Contribution to People as case study: Social media images have shown to be a valuable source of information for understanding human interactions with important subjects such as cultural heritage, biodiversity and nature among others. The task of grouping such images into a number of semantically meaningful clusters without labels is challenging given the high diversity and complex nature of the visual content of these images in addition to their large volume. On the other hand, the last advances in Large Visual Models (LVM), Large Language Models (LLM) and Large Visual Language Models (LVLM) provide an important opportunity to explore new productive and scalable solutions. This works proposes, analyzes, and compares various approaches based on one or more state-of-the art LVM, LLM and LVLM, for mapping social media images into a number of pre-defined classes. As case study, we consider the problem of understanding the interactions between human and nature, also known as Nature's Contribution to People or Cultural Ecosystem Services (CES). Our experiments reveal that the top-performing approaches, delivering highly competitive results, are the fine-tuned LVM DINOv2 on a small labeled dataset and LVLM models like the proprietary GPT-4 (gpt-4o-mini) using a simple prompt.<|reference_end|>
arxiv
@article{khaldi2024on, title={On Large Uni- and Multi-modal Models for Unsupervised Classification of Social Media Images: Nature's Contribution to People as a case study}, author={Rohaifa Khaldi, Domingo Alcaraz-Segura, Ignacio S'anchez-Herrera, Javier Martinez-Lopez, Carlos Javier Navarro, Siham Tabik}, journal={arXiv preprint arXiv:2410.00275}, year={2024}, archivePrefix={arXiv}, eprint={2410.00275}, primaryClass={cs.CV cs.AI} }
khaldi2024on
arxiv-663878
2410.00277
Towards Precise Detection of Personal Information Leaks in Mobile Health Apps
<|reference_start|>Towards Precise Detection of Personal Information Leaks in Mobile Health Apps: Mobile apps are used in a variety of health settings, from apps that help providers, to apps designed for patients, to health and fitness apps designed for the general public. These apps ask the user for, and then collect and leak a wealth of Personal Information (PI). We analyze the PI that apps collect via their user interface, whether the app or third-party code is processing this information, and finally where the data is sent or stored. Prior work on leak detection in Android has focused on detecting leaks of (hardware) device-identifying information, or policy violations; however no work has looked at processing and leaking of PI in the context of health apps. The first challenge we tackle is extracting the semantic information contained in app UIs to discern the extent, and nature, of personal information. The second challenge we tackle is disambiguating between first-party, legitimate leaks (e.g,. the app storing data in its database) and third-party, problematic leaks, e.g., processing this information by, or sending it to, advertisers and analytics. We conducted a study on 1,243 Android apps: 623 medical apps and 621 health&fitness apps. We categorize PI into 16 types, grouped in 3 main categories: identity, medical, anthropometric. We found that the typical app has one first-party leak and five third-party leaks, though 221 apps had 20 or more leaks. Next, we show that third-party leaks (e.g., advertisers, analytics) are 5x more frequent than first-party leaks. Then, we show that 71% of leaks are to local storage (i.e., the phone, where data could be accessed by unauthorized apps) whereas 29% of leaks are to the network (e.g., Cloud). Finally, medical apps have 20% more PI leaks than health&fitness apps, due to collecting additional medical PI.<|reference_end|>
arxiv
@article{ardalani2024towards, title={Towards Precise Detection of Personal Information Leaks in Mobile Health Apps}, author={Alireza Ardalani, Joseph Antonucci, Iulian Neamtiu}, journal={16th International Conference on e-Health, MCCSIS, July 13-15, 2024, Budapest, Hungary}, year={2024}, archivePrefix={arXiv}, eprint={2410.00277}, primaryClass={cs.CR cs.SE} }
ardalani2024towards
arxiv-663879
2410.00278
Neural network approaches for variance reduction in fluctuation formulas
<|reference_start|>Neural network approaches for variance reduction in fluctuation formulas: We propose a method utilizing physics-informed neural networks (PINNs) to solve Poisson equations that serve as control variates in the computation of transport coefficients via fluctuation formulas, such as the Green--Kubo and generalized Einstein-like formulas. By leveraging approximate solutions to the Poisson equation constructed through neural networks, our approach significantly reduces the variance of the estimator at hand. We provide an extensive numerical analysis of the estimators and detail a methodology for training neural networks to solve these Poisson equations. The approximate solutions are then incorporated into Monte Carlo simulations as effective control variates, demonstrating the suitability of the method for moderately high-dimensional problems where fully deterministic solutions are computationally infeasible.<|reference_end|>
arxiv
@article{pavliotis2024neural, title={Neural network approaches for variance reduction in fluctuation formulas}, author={Grigorios Pavliotis, Renato Spacek, Gabriel Stoltz, Urbain Vaes}, journal={arXiv preprint arXiv:2410.00278}, year={2024}, archivePrefix={arXiv}, eprint={2410.00278}, primaryClass={math.NA cs.NA} }
pavliotis2024neural
arxiv-663880
2410.00282
Smart Contract Vulnerability Detection based on Static Analysis and Multi-Objective Search
<|reference_start|>Smart Contract Vulnerability Detection based on Static Analysis and Multi-Objective Search: This paper introduces a method for detecting vulnerabilities in smart contracts using static analysis and a multi-objective optimization algorithm. We focus on four types of vulnerabilities: reentrancy, call stack overflow, integer overflow, and timestamp dependencies. Initially, smart contracts are compiled into an abstract syntax tree to analyze relationships between contracts and functions, including calls, inheritance, and data flow. These analyses are transformed into static evaluations and intermediate representations that reveal internal relations. Based on these representations, we examine contract's functions, variables, and data dependencies to detect the specified vulnerabilities. To enhance detection accuracy and coverage, we apply a multi-objective optimization algorithm to the static analysis process. This involves assigning initial numeric values to input data and monitoring changes in statement coverage and detection accuracy. Using coverage and accuracy as fitness values, we calculate Pareto front and crowding distance values to select the best individuals for the new parent population, iterating until optimization criteria are met. We validate our approach using an open-source dataset collected from Etherscan, containing 6,693 smart contracts. Experimental results show that our method outperforms state-of-the-art tools in terms of coverage, accuracy, efficiency, and effectiveness in detecting the targeted vulnerabilities.<|reference_end|>
arxiv
@article{li2024smart, title={Smart Contract Vulnerability Detection based on Static Analysis and Multi-Objective Search}, author={Dongcheng Li, W. Eric Wong, Xiaodan Wang, Sean Pan, and Liang-Seng Koh}, journal={arXiv preprint arXiv:2410.00282}, year={2024}, archivePrefix={arXiv}, eprint={2410.00282}, primaryClass={cs.SE} }
li2024smart
arxiv-663881
2410.00285
Performance Evaluation of Deep Learning-based Quadrotor UAV Detection and Tracking Methods
<|reference_start|>Performance Evaluation of Deep Learning-based Quadrotor UAV Detection and Tracking Methods: Unmanned Aerial Vehicles (UAVs) are becoming more popular in various sectors, offering many benefits, yet introducing significant challenges to privacy and safety. This paper investigates state-of-the-art solutions for detecting and tracking quadrotor UAVs to address these concerns. Cutting-edge deep learning models, specifically the YOLOv5 and YOLOv8 series, are evaluated for their performance in identifying UAVs accurately and quickly. Additionally, robust tracking systems, BoT-SORT and Byte Track, are integrated to ensure reliable monitoring even under challenging conditions. Our tests on the DUT dataset reveal that while YOLOv5 models generally outperform YOLOv8 in detection accuracy, the YOLOv8 models excel in recognizing less distinct objects, demonstrating their adaptability and advanced capabilities. Furthermore, BoT-SORT demonstrated superior performance over Byte Track, achieving higher IoU and lower center error in most cases, indicating more accurate and stable tracking. Code: https://github.com/zmanaa/UAV_detection_and_tracking Tracking demo: https://drive.google.com/file/d/1pe6HC5kQrgTbA2QrjvMN-yjaZyWeAvDT/view?usp=sharing<|reference_end|>
arxiv
@article{elshaar2024performance, title={Performance Evaluation of Deep Learning-based Quadrotor UAV Detection and Tracking Methods}, author={Mohssen E. Elshaar, Zeyad M. Manaa, Mohammed R. Elbalshy, Abdul Jabbar Siddiqui, Ayman M. Abdallah}, journal={arXiv preprint arXiv:2410.00285}, year={2024}, archivePrefix={arXiv}, eprint={2410.00285}, primaryClass={cs.CV} }
elshaar2024performance
arxiv-663882
2410.00287
Embodied Visuomotor Representation
<|reference_start|>Embodied Visuomotor Representation: Suppose you are at your desk looking at some objects on it. You don't know the precise distance from your eye to any particular object in meters. However, you can immediately reach out and touch any of them. Instead of the meter, your knowledge of distance is encoded in unknown but embodied units of action. In contrast, standard approaches in robotics assume calibration to the meter, so that separated vision and control processes can be interfaced. Consequently, robots are precisely manufactured and calibrated, resulting in expensive systems available in only a few configurations. In response, we propose Embodied Visuomotor Representation, a framework that allows distance to be measured by a robot's own actions and thus minimizes dependence on calibrated 3D sensors and physical models. Using it, we demonstrate that a robot without knowledge of its size, environmental scale, or its own strength can become capable of touching and clearing obstacles after several seconds of operation. Similarly, we demonstrate in simulation that an agent, without knowledge of its mass or strength, can jump a gap of unknown size after performing a few test oscillations. These experiments parallel bee and gerbil behavior, respectively.<|reference_end|>
arxiv
@article{burner2024embodied, title={Embodied Visuomotor Representation}, author={Levi Burner, Cornelia Ferm"uller, Yiannis Aloimonos}, journal={arXiv preprint arXiv:2410.00287}, year={2024}, archivePrefix={arXiv}, eprint={2410.00287}, primaryClass={cs.RO} }
burner2024embodied
arxiv-663883
2410.00288
GARCH-Informed Neural Networks for Volatility Prediction in Financial Markets
<|reference_start|>GARCH-Informed Neural Networks for Volatility Prediction in Financial Markets: Volatility, which indicates the dispersion of returns, is a crucial measure of risk and is hence used extensively for pricing and discriminating between different financial investments. As a result, accurate volatility prediction receives extensive attention. The Generalized Autoregressive Conditional Heteroscedasticity (GARCH) model and its succeeding variants are well established models for stock volatility forecasting. More recently, deep learning models have gained popularity in volatility prediction as they demonstrated promising accuracy in certain time series prediction tasks. Inspired by Physics-Informed Neural Networks (PINN), we constructed a new, hybrid Deep Learning model that combines the strengths of GARCH with the flexibility of a Long Short-Term Memory (LSTM) Deep Neural Network (DNN), thus capturing and forecasting market volatility more accurately than either class of models are capable of on their own. We refer to this novel model as a GARCH-Informed Neural Network (GINN). When compared to other time series models, GINN showed superior out-of-sample prediction performance in terms of the Coefficient of Determination ($R^2$), Mean Squared Error (MSE), and Mean Absolute Error (MAE).<|reference_end|>
arxiv
@article{xu2024garch-informed, title={GARCH-Informed Neural Networks for Volatility Prediction in Financial Markets}, author={Zeda Xu, John Liechty, Sebastian Benthall, Nicholas Skar-Gislinge, Christopher McComb}, journal={arXiv preprint arXiv:2410.00288}, year={2024}, archivePrefix={arXiv}, eprint={2410.00288}, primaryClass={q-fin.CP cs.LG} }
xu2024garch-informed
arxiv-663884
2410.00289
Delving Deep into Engagement Prediction of Short Videos
<|reference_start|>Delving Deep into Engagement Prediction of Short Videos: Understanding and modeling the popularity of User Generated Content (UGC) short videos on social media platforms presents a critical challenge with broad implications for content creators and recommendation systems. This study delves deep into the intricacies of predicting engagement for newly published videos with limited user interactions. Surprisingly, our findings reveal that Mean Opinion Scores from previous video quality assessment datasets do not strongly correlate with video engagement levels. To address this, we introduce a substantial dataset comprising 90,000 real-world UGC short videos from Snapchat. Rather than relying on view count, average watch time, or rate of likes, we propose two metrics: normalized average watch percentage (NAWP) and engagement continuation rate (ECR) to describe the engagement levels of short videos. Comprehensive multi-modal features, including visual content, background music, and text data, are investigated to enhance engagement prediction. With the proposed dataset and two key metrics, our method demonstrates its ability to predict engagements of short videos purely from video content.<|reference_end|>
arxiv
@article{li2024delving, title={Delving Deep into Engagement Prediction of Short Videos}, author={Dasong Li, Wenjie Li, Baili Lu, Hongsheng Li, Sizhuo Ma, Gurunandan Krishnan, Jian Wang}, journal={European conference on computer vision 2024}, year={2024}, archivePrefix={arXiv}, eprint={2410.00289}, primaryClass={cs.CV cs.MM cs.SI} }
li2024delving
arxiv-663885
2410.00292
Insight: A Multi-Modal Diagnostic Pipeline using LLMs for Ocular Surface Disease Diagnosis
<|reference_start|>Insight: A Multi-Modal Diagnostic Pipeline using LLMs for Ocular Surface Disease Diagnosis: Accurate diagnosis of ocular surface diseases is critical in optometry and ophthalmology, which hinge on integrating clinical data sources (e.g., meibography imaging and clinical metadata). Traditional human assessments lack precision in quantifying clinical observations, while current machine-based methods often treat diagnoses as multi-class classification problems, limiting the diagnoses to a predefined closed-set of curated answers without reasoning the clinical relevance of each variable to the diagnosis. To tackle these challenges, we introduce an innovative multi-modal diagnostic pipeline (MDPipe) by employing large language models (LLMs) for ocular surface disease diagnosis. We first employ a visual translator to interpret meibography images by converting them into quantifiable morphology data, facilitating their integration with clinical metadata and enabling the communication of nuanced medical insight to LLMs. To further advance this communication, we introduce a LLM-based summarizer to contextualize the insight from the combined morphology and clinical metadata, and generate clinical report summaries. Finally, we refine the LLMs' reasoning ability with domain-specific insight from real-life clinician diagnoses. Our evaluation across diverse ocular surface disease diagnosis benchmarks demonstrates that MDPipe outperforms existing standards, including GPT-4, and provides clinically sound rationales for diagnoses.<|reference_end|>
arxiv
@article{yeh2024insight:, title={Insight: A Multi-Modal Diagnostic Pipeline using LLMs for Ocular Surface Disease Diagnosis}, author={Chun-Hsiao Yeh, Jiayun Wang, Andrew D. Graham, Andrea J. Liu, Bo Tan, Yubei Chen, Yi Ma, Meng C. Lin}, journal={arXiv preprint arXiv:2410.00292}, year={2024}, archivePrefix={arXiv}, eprint={2410.00292}, primaryClass={cs.CL cs.CV} }
yeh2024insight:
arxiv-663886
2410.00295
NeuroVM: Dynamic Neuromorphic Hardware Virtualization
<|reference_start|>NeuroVM: Dynamic Neuromorphic Hardware Virtualization: This paper introduces a novel approach in neuromorphic computing, integrating heterogeneous hardware nodes into a unified, massively parallel architecture. Our system transcends traditional single-node constraints, harnessing the neural structure and functionality of the human brain to efficiently process complex tasks. We present an architecture that dynamically virtualizes neuromorphic resources, enabling adaptable allocation and reconfiguration for various applications. Our evaluation, using diverse applications and performance metrics, provides significant insights into the system's adaptability and efficiency. We observed scalable throughput increases across configurations of 1, 2, and 4 Virtual Machines (VMs), reaching up to 5.1 Gibibits per second (Gib/s) for different data transfer sizes. This scalability demonstrates the system's capacity to handle tasks that require substantial amounts of data. The energy consumption of our virtualized accelerator environment increased nearly linearly with the addition of more NeuroVM accelerators, ranging from 25 to 45 millijoules (mJ) as the number of accelerators increased from 1 to 20. Further, our investigation of reconfiguration overheads revealed that partial reconfigurations significantly reduce the time spent on reconfigurations compared to full reconfigurations, particularly when there are more virtual machines, as indicated by the logarithmic scale of time measurements.<|reference_end|>
arxiv
@article{isik2024neurovm:, title={NeuroVM: Dynamic Neuromorphic Hardware Virtualization}, author={Murat Isik, Jonathan Naoukin, I. Can Dikmen}, journal={arXiv preprint arXiv:2410.00295}, year={2024}, archivePrefix={arXiv}, eprint={2410.00295}, primaryClass={cs.AR} }
isik2024neurovm:
arxiv-663887
2410.00296
VLMGuard: Defending VLMs against Malicious Prompts via Unlabeled Data
<|reference_start|>VLMGuard: Defending VLMs against Malicious Prompts via Unlabeled Data: Vision-language models (VLMs) are essential for contextual understanding of both visual and textual information. However, their vulnerability to adversarially manipulated inputs presents significant risks, leading to compromised outputs and raising concerns about the reliability in VLM-integrated applications. Detecting these malicious prompts is thus crucial for maintaining trust in VLM generations. A major challenge in developing a safeguarding prompt classifier is the lack of a large amount of labeled benign and malicious data. To address the issue, we introduce VLMGuard, a novel learning framework that leverages the unlabeled user prompts in the wild for malicious prompt detection. These unlabeled prompts, which naturally arise when VLMs are deployed in the open world, consist of both benign and malicious information. To harness the unlabeled data, we present an automated maliciousness estimation score for distinguishing between benign and malicious samples within this unlabeled mixture, thereby enabling the training of a binary prompt classifier on top. Notably, our framework does not require extra human annotations, offering strong flexibility and practicality for real-world applications. Extensive experiment shows VLMGuard achieves superior detection results, significantly outperforming state-of-the-art methods. Disclaimer: This paper may contain offensive examples; reader discretion is advised.<|reference_end|>
arxiv
@article{du2024vlmguard:, title={VLMGuard: Defending VLMs against Malicious Prompts via Unlabeled Data}, author={Xuefeng Du, Reshmi Ghosh, Robert Sim, Ahmed Salem, Vitor Carvalho, Emily Lawton, Yixuan Li, Jack W. Stokes}, journal={arXiv preprint arXiv:2410.00296}, year={2024}, archivePrefix={arXiv}, eprint={2410.00296}, primaryClass={cs.LG cs.CR} }
du2024vlmguard:
arxiv-663888
2410.00297
Algorithmic Considerations for Effective Global Search of Robust Low-Thrust Trajectories
<|reference_start|>Algorithmic Considerations for Effective Global Search of Robust Low-Thrust Trajectories: The growing interest in the cislunar domain over the past decade has led to an increasing demand for low-thrust missions to key orbits within this region. These low-thrust missions, typically characterized by long thrust arcs, are highly susceptible to operational disruptions such as unforeseen thruster outages or missed thrust events. Consequently, there is a critical need for efficient trajectory design frameworks which incorporate robustness against such anomalies. In this study, we utilize a robust trajectory design framework to explore the solution space for the Power and Propulsion Element (PPE) module to the Earth-Moon L2 Southern 9:2 Near Rectilinear Halo Orbit. We propose algorithmic enhancements to improve the global search for robust solutions, and present a comprehensive analysis of two approaches: a nonconditional approach which involves a purely random search for robust solutions versus a conditional approach which involves warm-starting the search for robust solutions using the non-robust solutions. Our results indicate that by using non-robust solutions as initial guesses for the robust solutions, it is possible to achieve significant improvements in both the rate of convergence and the robustness of the final solutions.<|reference_end|>
arxiv
@article{sinha2024algorithmic, title={Algorithmic Considerations for Effective Global Search of Robust Low-Thrust Trajectories}, author={Amlan Sinha and Ryne Beeson}, journal={arXiv preprint arXiv:2410.00297}, year={2024}, archivePrefix={arXiv}, eprint={2410.00297}, primaryClass={math.NA cs.NA} }
sinha2024algorithmic
arxiv-663889
2410.00299
GSPR: Multimodal Place Recognition Using 3D Gaussian Splatting for Autonomous Driving
<|reference_start|>GSPR: Multimodal Place Recognition Using 3D Gaussian Splatting for Autonomous Driving: Place recognition is a crucial module to ensure autonomous vehicles obtain usable localization information in GPS-denied environments. In recent years, multimodal place recognition methods have gained increasing attention due to their ability to overcome the weaknesses of unimodal sensor systems by leveraging complementary information from different modalities. However, challenges arise from the necessity of harmonizing data across modalities and exploiting the spatio-temporal correlations between them sufficiently. In this paper, we propose a 3D Gaussian Splatting-based multimodal place recognition neural network dubbed GSPR. It explicitly combines multi-view RGB images and LiDAR point clouds into a spatio-temporally unified scene representation with the proposed Multimodal Gaussian Splatting. A network composed of 3D graph convolution and transformer is designed to extract high-level spatio-temporal features and global descriptors from the Gaussian scenes for place recognition. We evaluate our method on the nuScenes dataset, and the experimental results demonstrate that our method can effectively leverage complementary strengths of both multi-view cameras and LiDAR, achieving SOTA place recognition performance while maintaining solid generalization ability. Our open-source code is available at https://github.com/QiZS-BIT/GSPR.<|reference_end|>
arxiv
@article{qi2024gspr:, title={GSPR: Multimodal Place Recognition Using 3D Gaussian Splatting for Autonomous Driving}, author={Zhangshuo Qi, Junyi Ma, Jingyi Xu, Zijie Zhou, Luqi Cheng, and Guangming Xiong}, journal={arXiv preprint arXiv:2410.00299}, year={2024}, archivePrefix={arXiv}, eprint={2410.00299}, primaryClass={cs.CV} }
qi2024gspr:
arxiv-663890
2410.00301
Network Science in Psychology
<|reference_start|>Network Science in Psychology: Social network analysis can answer research questions such as why or how individuals interact or form relationships and how those relationships impact other outcomes. Despite the breadth of methods available to address psychological research questions, social network analysis is not yet a standard practice in psychological research. To promote the use of social network analysis in psychological research, we present an overview of network methods, situating each method within the context of research studies and questions in psychology.<|reference_end|>
arxiv
@article{sweet2024network, title={Network Science in Psychology}, author={Tracy Sweet, Selena Wang}, journal={arXiv preprint arXiv:2410.00301}, year={2024}, archivePrefix={arXiv}, eprint={2410.00301}, primaryClass={cs.SI stat.AP stat.ME} }
sweet2024network
arxiv-663891
2410.00302
Bayesian Intention for Enhanced Human Robot Collaboration
<|reference_start|>Bayesian Intention for Enhanced Human Robot Collaboration: Predicting human intent is challenging yet essential to achieving seamless Human-Robot Collaboration (HRC). Many existing approaches fail to fully exploit the inherent relationships between objects, tasks, and the human model. Current methods for predicting human intent, such as Gaussian Mixture Models (GMMs) and Conditional Random Fields (CRFs), often lack interpretability due to their failure to account for causal relationships between variables. To address these challenges, in this paper, we developed a novel Bayesian Intention (BI) framework to predict human intent within a multi-modality information framework in HRC scenarios. This framework captures the complexity of intent prediction by modeling the correlations between human behavior conventions and scene data. Our framework leverages these inferred intent predictions to optimize the robot's response in real-time, enabling smoother and more intuitive collaboration. We demonstrate the effectiveness of our approach through a HRC task involving a UR5 robot, highlighting BI's capability for real-time human intent prediction and collision avoidance using a unique dataset we created. Our evaluations show that the multi-modality BI model predicts human intent within 2.69ms, with a 36% increase in precision, a 60% increase in F1 Score, and an 85% increase in accuracy compared to its best baseline method. The results underscore BI's potential to advance real-time human intent prediction and collision avoidance, making a significant contribution to the field of HRC.<|reference_end|>
arxiv
@article{hernandez-cruz2024bayesian, title={Bayesian Intention for Enhanced Human Robot Collaboration}, author={Vanessa Hernandez-Cruz, Xiaotong Zhang, Kamal Youcef-Toumi}, journal={arXiv preprint arXiv:2410.00302}, year={2024}, archivePrefix={arXiv}, eprint={2410.00302}, primaryClass={cs.RO} }
hernandez-cruz2024bayesian
arxiv-663892
2410.00306
A structure-preserving implicit exponential time differencing scheme for Maxwell-Amp`ere Nernst-Planck model
<|reference_start|>A structure-preserving implicit exponential time differencing scheme for Maxwell-Amp`ere Nernst-Planck model: The transport of charged particles, which can be described by the Maxwell-Ampere Nernst-Planck (MANP) framework, is essential in various applications including ion channels and semiconductors. We propose a decoupled structure-preserving numerical scheme for the MANP model in this work. The Nernst-Planck equations are treated by the implicit exponential time differencing method associated with the Slotboom transform to preserve the positivity of the concentrations. In order to be effective with the Fast Fourier Transform, additional diffusive terms are introduced into Nernst-Planck equations. Meanwhile, the correction is introduced in the Maxwell-Ampere equation to fulfill Gauss's law. The curl-free condition for electric displacement is realized by a local curl-free relaxation algorithm whose complexity is O(N). We present sufficient restrictions on the time and spatial steps to satisfy the positivity and energy dissipation law at a discrete level. Numerical experiments are conducted to validate the expected numerical accuracy and demonstrate the structure-preserving properties of the proposed method.<|reference_end|>
arxiv
@article{guo2024a, title={A structure-preserving implicit exponential time differencing scheme for Maxwell-Amp`ere Nernst-Planck model}, author={Yunzhuo Guo, Qian Yin, Zhengru Zhang}, journal={arXiv preprint arXiv:2410.00306}, year={2024}, archivePrefix={arXiv}, eprint={2410.00306}, primaryClass={math.NA cs.NA} }
guo2024a
arxiv-663893
2410.00307
RadGazeGen: Radiomics and Gaze-guided Medical Image Generation using Diffusion Models
<|reference_start|>RadGazeGen: Radiomics and Gaze-guided Medical Image Generation using Diffusion Models: In this work, we present RadGazeGen, a novel framework for integrating experts' eye gaze patterns and radiomic feature maps as controls to text-to-image diffusion models for high fidelity medical image generation. Despite the recent success of text-to-image diffusion models, text descriptions are often found to be inadequate and fail to convey detailed disease-specific information to these models to generate clinically accurate images. The anatomy, disease texture patterns, and location of the disease are extremely important to generate realistic images; moreover the fidelity of image generation can have significant implications in downstream tasks involving disease diagnosis or treatment repose assessment. Hence, there is a growing need to carefully define the controls used in diffusion models for medical image generation. Eye gaze patterns of radiologists are important visuo-cognitive information, indicative of subtle disease patterns and spatial location. Radiomic features further provide important subvisual cues regarding disease phenotype. In this work, we propose to use these gaze patterns in combination with standard radiomics descriptors, as controls, to generate anatomically correct and disease-aware medical images. RadGazeGen is evaluated for image generation quality and diversity on the REFLACX dataset. To demonstrate clinical applicability, we also show classification performance on the generated images from the CheXpert test set (n=500) and long-tailed learning performance on the MIMIC-CXR-LT test set (n=23550).<|reference_end|>
arxiv
@article{bhattacharya2024radgazegen:, title={RadGazeGen: Radiomics and Gaze-guided Medical Image Generation using Diffusion Models}, author={Moinak Bhattacharya, Gagandeep Singh, Shubham Jain, Prateek Prasanna}, journal={arXiv preprint arXiv:2410.00307}, year={2024}, archivePrefix={arXiv}, eprint={2410.00307}, primaryClass={cs.CV} }
bhattacharya2024radgazegen:
arxiv-663894
2410.00309
Ask, Pose, Unite: Scaling Data Acquisition for Close Interactions with Vision Language Models
<|reference_start|>Ask, Pose, Unite: Scaling Data Acquisition for Close Interactions with Vision Language Models: Social dynamics in close human interactions pose significant challenges for Human Mesh Estimation (HME), particularly due to the complexity of physical contacts and the scarcity of training data. Addressing these challenges, we introduce a novel data generation method that utilizes Large Vision Language Models (LVLMs) to annotate contact maps which guide test-time optimization to produce paired image and pseudo-ground truth meshes. This methodology not only alleviates the annotation burden but also enables the assembly of a comprehensive dataset specifically tailored for close interactions in HME. Our Ask Pose Unite (APU) dataset, comprising over 6.2k human mesh pairs in contact covering diverse interaction types, is curated from images depicting naturalistic person-to-person scenes. We empirically show that using our dataset to train a diffusion-based contact prior, used as guidance during optimization, improves mesh estimation on unseen interactions. Our work addresses longstanding challenges of data scarcity for close interactions in HME enhancing the field's capabilities of handling complex interaction scenarios.<|reference_end|>
arxiv
@article{bravo-sánchez2024ask,, title={Ask, Pose, Unite: Scaling Data Acquisition for Close Interactions with Vision Language Models}, author={Laura Bravo-S'anchez, Jaewoo Heo, Zhenzhen Weng, Kuan-Chieh Wang and Serena Yeung-Levy}, journal={arXiv preprint arXiv:2410.00309}, year={2024}, archivePrefix={arXiv}, eprint={2410.00309}, primaryClass={cs.CV cs.AI cs.LG} }
bravo-sánchez2024ask,
arxiv-663895
2410.00312
Contrastive Representation Learning for Predicting Solar Flares from Extremely Imbalanced Multivariate Time Series Data
<|reference_start|>Contrastive Representation Learning for Predicting Solar Flares from Extremely Imbalanced Multivariate Time Series Data: Major solar flares are abrupt surges in the Sun's magnetic flux, presenting significant risks to technological infrastructure. In view of this, effectively predicting major flares from solar active region magnetic field data through machine learning methods becomes highly important in space weather research. Magnetic field data can be represented in multivariate time series modality where the data displays an extreme class imbalance due to the rarity of major flare events. In time series classification-based flare prediction, the use of contrastive representation learning methods has been relatively limited. In this paper, we introduce CONTREX, a novel contrastive representation learning approach for multivariate time series data, addressing challenges of temporal dependencies and extreme class imbalance. Our method involves extracting dynamic features from the multivariate time series instances, deriving two extremes from positive and negative class feature vectors that provide maximum separation capability, and training a sequence representation embedding module with the original multivariate time series data guided by our novel contrastive reconstruction loss to generate embeddings aligned with the extreme points. These embeddings capture essential time series characteristics and enhance discriminative power. Our approach shows promising solar flare prediction results on the Space Weather Analytics for Solar Flares (SWAN-SF) multivariate time series benchmark dataset against baseline methods.<|reference_end|>
arxiv
@article{vural2024contrastive, title={Contrastive Representation Learning for Predicting Solar Flares from Extremely Imbalanced Multivariate Time Series Data}, author={Onur Vural, Shah Muhammad Hamdi, Soukaina Filali Boubrahimi}, journal={arXiv preprint arXiv:2410.00312}, year={2024}, archivePrefix={arXiv}, eprint={2410.00312}, primaryClass={astro-ph.SR cs.AI cs.LG} }
vural2024contrastive
arxiv-663896
2410.00313
Pre-Chirp-Domain Index Modulation for Full-Diversity Affine Frequency Division Multiplexing towards 6G
<|reference_start|>Pre-Chirp-Domain Index Modulation for Full-Diversity Affine Frequency Division Multiplexing towards 6G: Affine frequency division multiplexing (AFDM), tailored as a superior multicarrier technique utilizing chirp signals for high-mobility communications, is envisioned as a promising candidate for the sixth-generation (6G) wireless network. AFDM is based on the discrete affine Fourier transform (DAFT) with two adjustable parameters of the chirp signals, termed as the pre-chirp and post-chirp parameters, respectively. We show that the pre-chirp counterpart can be flexibly manipulated for additional degree-of-freedom (DoF). Therefore, this paper proposes a novel AFDM scheme with the pre-chirp index modulation (PIM) philosophy (AFDM-PIM), which can implicitly convey extra information bits through dynamic pre-chirp parameter assignment, thus enhancing both spectral and energy efficiency. Specifically, we first demonstrate that the subcarrier orthogonality is still maintained by applying distinct pre-chirp parameters to various subcarriers in the AFDM modulation process. Inspired by this property, each AFDM subcarrier is constituted with a unique pre-chirp signal according to the incoming bits. By such arrangement, extra binary bits can be embedded into the index patterns of pre-chirp parameter assignment without additional energy consumption. For performance analysis, we derive the asymptotically tight upper bounds on the average bit error rates (BERs) of the proposed schemes with maximum-likelihood (ML) detection, and validate that the proposed AFDM-PIM can achieve the optimal diversity order under doubly dispersive channels. Based on the derivations, we further propose an optimal pre-chirp alphabet design to enhance the BER performance via intelligent optimization algorithms. Simulations demonstrate that the proposed AFDM-PIM outperforms the classical benchmarks under doubly dispersive channel.<|reference_end|>
arxiv
@article{liu2024pre-chirp-domain, title={Pre-Chirp-Domain Index Modulation for Full-Diversity Affine Frequency Division Multiplexing towards 6G}, author={Guangyao Liu, Tianqi Mao, Zhenyu Xiao, Ruiqi Liu, Miaowen Wen}, journal={arXiv preprint arXiv:2410.00313}, year={2024}, archivePrefix={arXiv}, eprint={2410.00313}, primaryClass={cs.IT eess.SP math.IT} }
liu2024pre-chirp-domain
arxiv-663897
2410.00316
EmoKnob: Enhance Voice Cloning with Fine-Grained Emotion Control
<|reference_start|>EmoKnob: Enhance Voice Cloning with Fine-Grained Emotion Control: While recent advances in Text-to-Speech (TTS) technology produce natural and expressive speech, they lack the option for users to select emotion and control intensity. We propose EmoKnob, a framework that allows fine-grained emotion control in speech synthesis with few-shot demonstrative samples of arbitrary emotion. Our framework leverages the expressive speaker representation space made possible by recent advances in foundation voice cloning models. Based on the few-shot capability of our emotion control framework, we propose two methods to apply emotion control on emotions described by open-ended text, enabling an intuitive interface for controlling a diverse array of nuanced emotions. To facilitate a more systematic emotional speech synthesis field, we introduce a set of evaluation metrics designed to rigorously assess the faithfulness and recognizability of emotion control frameworks. Through objective and subjective evaluations, we show that our emotion control framework effectively embeds emotions into speech and surpasses emotion expressiveness of commercial TTS services.<|reference_end|>
arxiv
@article{chen2024emoknob:, title={EmoKnob: Enhance Voice Cloning with Fine-Grained Emotion Control}, author={Haozhe Chen, Run Chen, Julia Hirschberg}, journal={arXiv preprint arXiv:2410.00316}, year={2024}, archivePrefix={arXiv}, eprint={2410.00316}, primaryClass={cs.CL cs.AI cs.HC cs.SD eess.AS} }
chen2024emoknob:
arxiv-663898
2410.00318
Probing Mechanical Reasoning in Large Vision Language Models
<|reference_start|>Probing Mechanical Reasoning in Large Vision Language Models: Mechanical reasoning is a fundamental ability that sets human intelligence apart from other animal intelligence. Mechanical reasoning allows us to design tools, build bridges and canals, and construct houses which set the foundation of human civilization. Embedding machines with such ability is an important step towards building human-level artificial intelligence. Recently, Li et al. built CogDevelop2K, a data-intensive cognitive experiment benchmark for assaying the developmental trajectory of machine intelligence (Li et al., 2024). Here, to investigate mechanical reasoning in Vision Language Models, we leverage the MechBench of CogDevelop2K, which contains approximately 150 cognitive experiments, to test understanding of mechanical system stability, gears and pulley systems, seesaw-like systems and leverage principle, inertia and motion, and other fluid-related systems in Large Vision Language Models. We observe diverse yet consistent behaviors over these aspects in VLMs.<|reference_end|>
arxiv
@article{sun2024probing, title={Probing Mechanical Reasoning in Large Vision Language Models}, author={Haoran Sun, Qingying Gao, Haiyun Lyu, Dezhi Luo, Hokin Deng, Yijiang Li}, journal={arXiv preprint arXiv:2410.00318}, year={2024}, archivePrefix={arXiv}, eprint={2410.00318}, primaryClass={cs.AI q-bio.NC} }
sun2024probing
arxiv-663899
2410.00320
PointAD: Comprehending 3D Anomalies from Points and Pixels for Zero-shot 3D Anomaly Detection
<|reference_start|>PointAD: Comprehending 3D Anomalies from Points and Pixels for Zero-shot 3D Anomaly Detection: Zero-shot (ZS) 3D anomaly detection is a crucial yet unexplored field that addresses scenarios where target 3D training samples are unavailable due to practical concerns like privacy protection. This paper introduces PointAD, a novel approach that transfers the strong generalization capabilities of CLIP for recognizing 3D anomalies on unseen objects. PointAD provides a unified framework to comprehend 3D anomalies from both points and pixels. In this framework, PointAD renders 3D anomalies into multiple 2D renderings and projects them back into 3D space. To capture the generic anomaly semantics into PointAD, we propose hybrid representation learning that optimizes the learnable text prompts from 3D and 2D through auxiliary point clouds. The collaboration optimization between point and pixel representations jointly facilitates our model to grasp underlying 3D anomaly patterns, contributing to detecting and segmenting anomalies of unseen diverse 3D objects. Through the alignment of 3D and 2D space, our model can directly integrate RGB information, further enhancing the understanding of 3D anomalies in a plug-and-play manner. Extensive experiments show the superiority of PointAD in ZS 3D anomaly detection across diverse unseen objects.<|reference_end|>
arxiv
@article{zhou2024pointad:, title={PointAD: Comprehending 3D Anomalies from Points and Pixels for Zero-shot 3D Anomaly Detection}, author={Qihang Zhou, Jiangtao Yan, Shibo He, Wenchao Meng, Jiming Chen}, journal={arXiv preprint arXiv:2410.00320}, year={2024}, archivePrefix={arXiv}, eprint={2410.00320}, primaryClass={cs.CV cs.CL} }
zhou2024pointad:
arxiv-663900
2410.00321
A Cat Is A Cat (Not A Dog!): Unraveling Information Mix-ups in Text-to-Image Encoders through Causal Analysis and Embedding Optimization
<|reference_start|>A Cat Is A Cat (Not A Dog!): Unraveling Information Mix-ups in Text-to-Image Encoders through Causal Analysis and Embedding Optimization: This paper analyzes the impact of causal manner in the text encoder of text-to-image (T2I) diffusion models, which can lead to information bias and loss. Previous works have focused on addressing the issues through the denoising process. However, there is no research discussing how text embedding contributes to T2I models, especially when generating more than one object. In this paper, we share a comprehensive analysis of text embedding: i) how text embedding contributes to the generated images and ii) why information gets lost and biases towards the first-mentioned object. Accordingly, we propose a simple but effective text embedding balance optimization method, which is training-free, with an improvement of 90.05% on information balance in stable diffusion. Furthermore, we propose a new automatic evaluation metric that quantifies information loss more accurately than existing methods, achieving 81% concordance with human assessments. This metric effectively measures the presence and accuracy of objects, addressing the limitations of current distribution scores like CLIP's text-image similarities.<|reference_end|>
arxiv
@article{chen2024a, title={A Cat Is A Cat (Not A Dog!): Unraveling Information Mix-ups in Text-to-Image Encoders through Causal Analysis and Embedding Optimization}, author={Chieh-Yun Chen, Chiang Tseng, Li-Wu Tsao, Hong-Han Shuai}, journal={arXiv preprint arXiv:2410.00321}, year={2024}, archivePrefix={arXiv}, eprint={2410.00321}, primaryClass={cs.CV} }
chen2024a