@article{Guerra Londono_Castano Londono_Alzate Anzola_Marquez Viloria_Velasquez Velez_2020, title={Análisis de desempeño de capas de CNN para arquitecturas heterog´eneas basadas en FPGAs usando HLS}, volume={26}, url={https://revistas.udistrital.edu.co/index.php/reving/article/view/15634}, DOI={10.14483/23448393.15634}, abstractNote={<p><strong>Context:</strong> Convolutional neural networks (CNNs) are currently used in a wide range of artificial intelligence applications. In many cases, these applications require the execution of the networks in real time on embedded devices. Hence the interest in these applications achieving excellent performance with low power consumption. CNNs perform operations between the input data and the network weights, with the particularity that there is no dependence between most of the operations. Thus, the inherent parallelism of Field Programmable Gate Arrays (FPGAs) can be used to perform multiple operations in parallel, maintaining the good performance per watt that characterizes these devices. This paper focuses on evaluating the convolution algorithm for a convolutional layer of neural networks by exploring parallelization directives using VIVADO HLS, and it aims to evaluate the performance of the algorithm using optimization directives.</p> <p><strong>Method:</strong> The methodology consists of an exploration of the design space of a convolutional neural network layer implementation using VIVADO HLS. Performance verification of the FPGA was performed by comparing the output data with the same convolution algorithm implemented in MATLAB. A layer of the commercial version Xilinx DNNK was used as a reference for performance measurements of the different implementations obtained during the exploration of the design space. In this work, multiple variations of optimization directives are used, such as pipeline, array partition and unroll.</p> <p><strong>Results:</strong> This paper presents the results of a reference implementation (without optimization directives) of the convolution algorithm concerning algorithm latency and the hardware resources used by the FPGA. The results are compared with the implementations of the algorithm, including different combinations of two optimization directives (pipeline and partition array).</p> <p><strong>Conclusions:</strong> This work explores the design space of a convolution algorithm for a convolutional neural network layer on FPGAs. The exploration includes the effect of data transfer between DDR memory and the on-chip memory of the FPGA. Also, said effect is caused by the optimization directives in VIVADO HLS on the different cycles of the algorithm.</p> <p><strong>Acknowledgements</strong>: This work was supported in part by the Autom´atica, Electr´onica y Ciencias Computacionales Group (COL0053581) - Instituto Tecnol´ogico Metropolitano and in part by Sistemas Embebidos e Inteligencia Computacional Group (COL0010717) - Universidad de Antioquia under Grant P17224.</p>}, number={1}, journal={Ingeniería}, author={Guerra Londono, Mateo and Castano Londono, Luis Fernando and Alzate Anzola, Cristian Camilo and Marquez Viloria, David Andres and Velasquez Velez, Ricardo Andres}, year={2020}, month={Dec.}, pages={62–76} }