From 7d6ff8e606ae457dbcf7f7c0921cf2a028225cf2 Mon Sep 17 00:00:00 2001 From: Louis Date: Wed, 22 May 2024 14:04:04 +0100 Subject: [PATCH 1/5] initial migrate --- src/components/article/ArticleContent.astro | 3 +- src/components/head/BaseHead.astro | 2 +- ...hts-from-linaro-s-windows-on-arm-group.mdx | 49 +++ ...bit-linux-bright-future-or-end-of-life.mdx | 18 + ...-development-platform-bootprint-x2-kit.mdx | 44 ++ .../a-year-in-the-life-of-the-stg-group.mdx | 160 +++++++ ...format-conversion-using-opengl-shaders.mdx | 349 +++++++++++++++ ...rect-ta-load-address-with-aslr-enabled.mdx | 26 ++ ...-qualcomm-reference-boards-rb5-and-rb3.mdx | 43 ++ ...mainline-kernel-on-form-factor-devices.mdx | 31 ++ ...nsfers-cmsis-pack-technology-to-linaro.mdx | 43 ++ ...d-reporting-of-performance-regressions.mdx | 35 ++ ...ucing-linaro-virtual-connect-fall-2021.mdx | 59 +++ .../blogs/bfq-saved-me-from-thrashing.mdx | 36 ++ ...ns-for-multi-actuator-sata-hard-drives.mdx | 84 ++++ src/content/blogs/can-we-make-ai-super.mdx | 83 ++++ ...nd-performance-results-in-a-board-farm.mdx | 234 ++++++++++ ...pc-applications-while-working-remotely.mdx | 126 ++++++ .../debugging-memory-tagging-with-lldb-13.mdx | 370 ++++++++++++++++ ...gging-while-you-sleep-using-linaro-ddt.mdx | 64 +++ .../blogs/device-tree-future-improvements.mdx | 72 +++ .../blogs/dma-buf-heap-transition-in-aosp.mdx | 68 +++ .../blogs/dragonboard-845c-in-aosp.mdx | 48 ++ .../enabling-uefi-secure-boot-on-u-boot.mdx | 288 ++++++++++++ ...rmance-through-enhanced-kernel-testing.mdx | 78 ++++ .../force-idle-when-a-cpu-is-overheating.mdx | 134 ++++++ ...o-expect-in-future-linux-distributions.mdx | 188 ++++++++ ...e-systems-the-new-open-source-frontier.mdx | 38 ++ ...ing-hpc-reflection-and-forward-looking.mdx | 105 +++++ ...asia-2019-and-linaros-arm-hpc-workshop.mdx | 66 +++ ...-technologies-and-open-source-software.mdx | 48 ++ .../highlights-from-lund-linux-con-2019.mdx | 33 ++ .../history-of-the-interconnect-framework.mdx | 58 +++ ...-over-a-million-linux-kernels-per-year.mdx | 127 ++++++ .../how-to-build-flang-on-windows-on-arm.mdx | 90 ++++ ...ed-platform-module-in-qemu-with-u-boot.mdx | 404 +++++++++++++++++ ...to-set-up-vs-code-for-llvm-development.mdx | 86 ++++ ...up-windows-on-arm-for-llvm-development.mdx | 187 ++++++++ ...lldb-to-debug-sve-enabled-applications.mdx | 143 ++++++ .../improving-audio-latency-in-android.mdx | 39 ++ ...rated-video-decoding-with-v4l2-in-aosp.mdx | 63 +++ ...gement-for-production-quality-services.mdx | 173 ++++++++ ...mm-robotics-platform-rb5-now-available.mdx | 27 ++ ...nline-linux-kernel-on-qualcomm-devices.mdx | 373 ++++++++++++++++ ...et-s-talk-about-homomorphic-encryption.mdx | 45 ++ .../blogs/linaro-a-decade-of-development.mdx | 64 +++ .../blogs/linaro-and-the-linux-kernel.mdx | 121 ++++++ ...-testing-and-automation-to-fosdem-2020.mdx | 30 ++ ...linaro-connect-budapest-2020-cancelled.mdx | 22 + ...ity-ci-officially-supporting-openeuler.mdx | 62 +++ ...tions-to-the-5-17-linux-kernel-release.mdx | 80 ++++ ...tions-to-the-5-18-linux-kernel-release.mdx | 46 ++ ...tions-to-the-linux-kernel-5-16-release.mdx | 61 +++ ...eveloper-cloud-kubernetes-as-a-service.mdx | 69 +++ ...an-impact-in-linux-kernel-5-13-release.mdx | 62 +++ ...aro-developers-top-5-12-kernel-release.mdx | 63 +++ ...zed-information-hub-for-arm-developers.mdx | 48 ++ ...ng-highlights-april-2020-new-version-2.mdx | 152 +++++++ ...o-engineering-highlights-december-2020.mdx | 410 ++++++++++++++++++ ...engineering-highlights-for-august-2020.mdx | 133 ++++++ ...linaro-engineering-highlights-may-2020.mdx | 329 ++++++++++++++ ...e-first-kernel-to-be-released-on-arm64.mdx | 87 ++++ ...tion-and-region-profiling-capabilities.mdx | 45 ++ ...tive-linux-kernel-contributors-in-2022.mdx | 93 ++++ src/content/blogs/linaro-in-openstack.mdx | 58 +++ ...butors-to-the-6-0-linux-kernel-release.mdx | 85 ++++ ...the-first-parallel-debugger-for-python.mdx | 87 ++++ ...releases-ledge-reference-platform-v0-2.mdx | 60 +++ ...rvisor-agnostic-vhost-user-i2c-backend.mdx | 133 ++++++ ...livestream-event-of-technical-sessions.mdx | 28 ++ ...-virtualization-at-embedded-world-2022.mdx | 38 ++ ...nd-the-mystery-of-the-non-address-bits.mdx | 354 +++++++++++++++ src/content/blogs/lvc20-wrap-up.mdx | 196 +++++++++ src/content/blogs/many-uses-of-qemu.mdx | 72 +++ ...oot-becomes-a-linaro-community-project.mdx | 21 + ...int-devices-upstreamed-to-linux-kernel.mdx | 132 ++++++ ...support-gets-added-to-the-linux-kernel.mdx | 222 ++++++++++ ...upcoming-linaro-connect-san-diego-2019.mdx | 30 ++ ...iprocessing-and-openamp-messaging-demo.mdx | 30 ++ ...rk-latency-with-tsn-on-virtual-machine.mdx | 135 ++++++ ...a-result-of-reworking-the-load-balance.mdx | 77 ++++ ...rust-sources-for-linux-kernel-keyrings.mdx | 65 +++ .../blogs/next-qemu-development-cycle.mdx | 105 +++++ .../blogs/op-tee-and-the-need-for-ff-a.mdx | 127 ++++++ .../open-on-chip-debugger-ocd-at-linaro.mdx | 41 ++ ...amp-becomes-a-linaro-community-project.mdx | 23 + ...ed-yocto-project-for-kernel-developers.mdx | 231 ++++++++++ ...low-convolution-performance-on-aarch64.mdx | 64 +++ ...-linux-tools-into-morello-architecture.mdx | 54 +++ .../porting-linux-to-aarch64-laptops.mdx | 40 ++ ...inaro-forge-and-a-performance-surprise.mdx | 43 ++ .../protected-uefi-variables-with-u-boot.mdx | 223 ++++++++++ .../protecting-security-critical-firmware.mdx | 113 +++++ .../blogs/python-and-go-in-the-arm-world.mdx | 63 +++ ...qemu-8-2-and-linaro-s-maintainer-story.mdx | 199 +++++++++ ...opments-in-the-open-cmsis-pack-project.mdx | 45 ++ ...machine-outliner-on-32-bit-arm-targets.mdx | 109 +++++ .../reimagining-linaro-virtual-events.mdx | 45 ++ ...-n-v-join-the-trusted-firmware-project.mdx | 23 + ...curing-a-device-with-trusted-substrate.mdx | 259 +++++++++++ .../blogs/security-and-the-zephyr-project.mdx | 53 +++ ...tting-up-tensorflow-for-windows-on-arm.mdx | 90 ++++ ...ling-now-a-reality-in-the-linux-kernel.mdx | 45 ++ ...iple-devices-with-the-same-aosp-images.mdx | 70 +++ .../the-challenges-of-abstracting-virtio.mdx | 57 +++ src/content/blogs/the-end-of-an-era.mdx | 61 +++ .../the-evolution-of-the-qemu-translator.mdx | 131 ++++++ .../blogs/the-kisscache-caching-server.mdx | 100 +++++ ...ft-towards-hpc-ai-and-why-it-is-needed.mdx | 51 +++ .../thermal-notifications-with-netlink.mdx | 114 +++++ .../tuxpub-the-serverless-file-server.mdx | 67 +++ ...date-on-hikey-hikey960-efforts-in-aosp.mdx | 32 ++ ...-camera-support-for-qualcomm-platforms.mdx | 79 ++++ ...omm-snapdragon-8-gen-2-mobile-platform.mdx | 103 +++++ ...aming-support-for-qualcomm-pcie-modems.mdx | 116 +++++ ...ing-energy-model-to-stay-in-tdp-budget.mdx | 106 +++++ ...ion-to-detect-false-cache-line-sharing.mdx | 90 ++++ src/content/blogs/virtio-work.mdx | 183 ++++++++ src/content/blogs/what-is-linaro.mdx | 27 ++ ...-embedded-linux-conference-europe-2019.mdx | 50 +++ ...he-possibilities-of-native-development.mdx | 46 ++ ...m-now-supported-in-python-3-11-release.mdx | 143 ++++++ 122 files changed, 12259 insertions(+), 2 deletions(-) create mode 100644 src/content/blogs/2022-highlights-from-linaro-s-windows-on-arm-group.mdx create mode 100644 src/content/blogs/32-bit-linux-bright-future-or-end-of-life.mdx create mode 100644 src/content/blogs/96boards-and-horizon-robotics-jointly-launch-aiot-development-platform-bootprint-x2-kit.mdx create mode 100644 src/content/blogs/a-year-in-the-life-of-the-stg-group.mdx create mode 100644 src/content/blogs/accelerating-libcamera-qcam-format-conversion-using-opengl-shaders.mdx create mode 100644 src/content/blogs/add-support-to-retrieve-correct-ta-load-address-with-aslr-enabled.mdx create mode 100644 src/content/blogs/android-13-now-available-on-qualcomm-reference-boards-rb5-and-rb3.mdx create mode 100644 src/content/blogs/aosp-on-pixel3-pocof1-running-aosp-with-mainline-kernel-on-form-factor-devices.mdx create mode 100644 src/content/blogs/arm-transfers-cmsis-pack-technology-to-linaro.mdx create mode 100644 src/content/blogs/automatic-detection-and-reporting-of-performance-regressions.mdx create mode 100644 src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx create mode 100644 src/content/blogs/bfq-saved-me-from-thrashing.mdx create mode 100644 src/content/blogs/budget-fair-queueing-bfq-linux-io-scheduler-optimizations-for-multi-actuator-sata-hard-drives.mdx create mode 100644 src/content/blogs/can-we-make-ai-super.mdx create mode 100644 src/content/blogs/challenges-of-stabilising-power-and-performance-results-in-a-board-farm.mdx create mode 100644 src/content/blogs/debugging-and-profiling-hpc-applications-while-working-remotely.mdx create mode 100644 src/content/blogs/debugging-memory-tagging-with-lldb-13.mdx create mode 100644 src/content/blogs/debugging-while-you-sleep-using-linaro-ddt.mdx create mode 100644 src/content/blogs/device-tree-future-improvements.mdx create mode 100644 src/content/blogs/dma-buf-heap-transition-in-aosp.mdx create mode 100644 src/content/blogs/dragonboard-845c-in-aosp.mdx create mode 100644 src/content/blogs/enabling-uefi-secure-boot-on-u-boot.mdx create mode 100644 src/content/blogs/ensuring-optimal-performance-through-enhanced-kernel-testing.mdx create mode 100644 src/content/blogs/force-idle-when-a-cpu-is-overheating.mdx create mode 100644 src/content/blogs/glibc-improvements-and-what-to-expect-in-future-linux-distributions.mdx create mode 100644 src/content/blogs/heterogeneous-multicore-systems-the-new-open-source-frontier.mdx create mode 100644 src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx create mode 100644 src/content/blogs/highlights-from-hpc-asia-2019-and-linaros-arm-hpc-workshop.mdx create mode 100644 src/content/blogs/highlights-from-linaro-connect-on-qualcomm-technologies-and-open-source-software.mdx create mode 100644 src/content/blogs/highlights-from-lund-linux-con-2019.mdx create mode 100644 src/content/blogs/history-of-the-interconnect-framework.mdx create mode 100644 src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx create mode 100644 src/content/blogs/how-to-build-flang-on-windows-on-arm.mdx create mode 100644 src/content/blogs/how-to-emulate-trusted-platform-module-in-qemu-with-u-boot.mdx create mode 100644 src/content/blogs/how-to-set-up-vs-code-for-llvm-development.mdx create mode 100644 src/content/blogs/how-to-set-up-windows-on-arm-for-llvm-development.mdx create mode 100644 src/content/blogs/how-to-use-lldb-to-debug-sve-enabled-applications.mdx create mode 100644 src/content/blogs/improving-audio-latency-in-android.mdx create mode 100644 src/content/blogs/integrating-accelerated-video-decoding-with-v4l2-in-aosp.mdx create mode 100644 src/content/blogs/io-bandwidth-management-for-production-quality-services.mdx create mode 100644 src/content/blogs/latest-support-for-debian-openembedded-releases-for-qualcomm-robotics-platform-rb5-now-available.mdx create mode 100644 src/content/blogs/let-s-boot-the-mainline-linux-kernel-on-qualcomm-devices.mdx create mode 100644 src/content/blogs/let-s-talk-about-homomorphic-encryption.mdx create mode 100644 src/content/blogs/linaro-a-decade-of-development.mdx create mode 100644 src/content/blogs/linaro-and-the-linux-kernel.mdx create mode 100644 src/content/blogs/linaro-brings-testing-and-automation-to-fosdem-2020.mdx create mode 100644 src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx create mode 100644 src/content/blogs/linaro-contributes-to-the-openstack-community-ci-officially-supporting-openeuler.mdx create mode 100644 src/content/blogs/linaro-contributions-to-the-5-17-linux-kernel-release.mdx create mode 100644 src/content/blogs/linaro-contributions-to-the-5-18-linux-kernel-release.mdx create mode 100644 src/content/blogs/linaro-contributions-to-the-linux-kernel-5-16-release.mdx create mode 100644 src/content/blogs/linaro-developer-cloud-kubernetes-as-a-service.mdx create mode 100644 src/content/blogs/linaro-developers-make-an-impact-in-linux-kernel-5-13-release.mdx create mode 100644 src/content/blogs/linaro-developers-top-5-12-kernel-release.mdx create mode 100644 src/content/blogs/linaro-ecosystem-dashboard-a-centralized-information-hub-for-arm-developers.mdx create mode 100644 src/content/blogs/linaro-engineering-highlights-april-2020-new-version-2.mdx create mode 100644 src/content/blogs/linaro-engineering-highlights-december-2020.mdx create mode 100644 src/content/blogs/linaro-engineering-highlights-for-august-2020.mdx create mode 100644 src/content/blogs/linaro-engineering-highlights-may-2020.mdx create mode 100644 src/content/blogs/linaro-featured-top-in-5-19-linux-kernel-release-the-first-kernel-to-be-released-on-arm64.mdx create mode 100644 src/content/blogs/linaro-forge-19-1-introducing-forge-ultimate-edition-and-region-profiling-capabilities.mdx create mode 100644 src/content/blogs/linaro-high-up-the-list-for-most-active-linux-kernel-contributors-in-2022.mdx create mode 100644 src/content/blogs/linaro-in-openstack.mdx create mode 100644 src/content/blogs/linaro-in-top-five-for-most-active-contributors-to-the-6-0-linux-kernel-release.mdx create mode 100644 src/content/blogs/linaro-introduces-the-first-parallel-debugger-for-python.mdx create mode 100644 src/content/blogs/linaro-releases-ledge-reference-platform-v0-2.mdx create mode 100644 src/content/blogs/linaro-s-rust-based-hypervisor-agnostic-vhost-user-i2c-backend.mdx create mode 100644 src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx create mode 100644 src/content/blogs/linaro-to-present-on-embedded-ai-and-virtualization-at-embedded-world-2022.mdx create mode 100644 src/content/blogs/lldb-15-and-the-mystery-of-the-non-address-bits.mdx create mode 100644 src/content/blogs/lvc20-wrap-up.mdx create mode 100644 src/content/blogs/many-uses-of-qemu.mdx create mode 100644 src/content/blogs/mcuboot-becomes-a-linaro-community-project.mdx create mode 100644 src/content/blogs/mhi-bus-for-endpoint-devices-upstreamed-to-linux-kernel.mdx create mode 100644 src/content/blogs/mhi-bus-support-gets-added-to-the-linux-kernel.mdx create mode 100644 src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx create mode 100644 src/content/blogs/multiprocessing-and-openamp-messaging-demo.mdx create mode 100644 src/content/blogs/network-latency-with-tsn-on-virtual-machine.mdx create mode 100644 src/content/blogs/network-throughput-performance-improves-as-a-result-of-reworking-the-load-balance.mdx create mode 100644 src/content/blogs/new-trust-sources-for-linux-kernel-keyrings.mdx create mode 100644 src/content/blogs/next-qemu-development-cycle.mdx create mode 100644 src/content/blogs/op-tee-and-the-need-for-ff-a.mdx create mode 100644 src/content/blogs/open-on-chip-debugger-ocd-at-linaro.mdx create mode 100644 src/content/blogs/openamp-becomes-a-linaro-community-project.mdx create mode 100644 src/content/blogs/openembedded-yocto-project-for-kernel-developers.mdx create mode 100644 src/content/blogs/optimizing-tensorflow-convolution-performance-on-aarch64.mdx create mode 100644 src/content/blogs/porting-common-linux-tools-into-morello-architecture.mdx create mode 100644 src/content/blogs/porting-linux-to-aarch64-laptops.mdx create mode 100644 src/content/blogs/profiling-python-and-compiled-code-with-linaro-forge-and-a-performance-surprise.mdx create mode 100644 src/content/blogs/protected-uefi-variables-with-u-boot.mdx create mode 100644 src/content/blogs/protecting-security-critical-firmware.mdx create mode 100644 src/content/blogs/python-and-go-in-the-arm-world.mdx create mode 100644 src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx create mode 100644 src/content/blogs/recent-developments-in-the-open-cmsis-pack-project.mdx create mode 100644 src/content/blogs/reducing-code-size-with-llvm-machine-outliner-on-32-bit-arm-targets.mdx create mode 100644 src/content/blogs/reimagining-linaro-virtual-events.mdx create mode 100644 src/content/blogs/renesas-electronics-and-nxp-semiconductors-n-v-join-the-trusted-firmware-project.mdx create mode 100644 src/content/blogs/securing-a-device-with-trusted-substrate.mdx create mode 100644 src/content/blogs/security-and-the-zephyr-project.mdx create mode 100644 src/content/blogs/setting-up-tensorflow-for-windows-on-arm.mdx create mode 100644 src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx create mode 100644 src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx create mode 100644 src/content/blogs/the-challenges-of-abstracting-virtio.mdx create mode 100644 src/content/blogs/the-end-of-an-era.mdx create mode 100644 src/content/blogs/the-evolution-of-the-qemu-translator.mdx create mode 100644 src/content/blogs/the-kisscache-caching-server.mdx create mode 100644 src/content/blogs/the-shift-towards-hpc-ai-and-why-it-is-needed.mdx create mode 100644 src/content/blogs/thermal-notifications-with-netlink.mdx create mode 100644 src/content/blogs/tuxpub-the-serverless-file-server.mdx create mode 100644 src/content/blogs/update-on-hikey-hikey960-efforts-in-aosp.mdx create mode 100644 src/content/blogs/upstream-camera-support-for-qualcomm-platforms.mdx create mode 100644 src/content/blogs/upstream-linux-support-now-available-for-the-the-qualcomm-snapdragon-8-gen-2-mobile-platform.mdx create mode 100644 src/content/blogs/upstreaming-support-for-qualcomm-pcie-modems.mdx create mode 100644 src/content/blogs/using-energy-model-to-stay-in-tdp-budget.mdx create mode 100644 src/content/blogs/using-the-arm-statistical-profiling-extension-to-detect-false-cache-line-sharing.mdx create mode 100644 src/content/blogs/virtio-work.mdx create mode 100644 src/content/blogs/what-is-linaro.mdx create mode 100644 src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx create mode 100644 src/content/blogs/windows-on-arm-and-the-possibilities-of-native-development.mdx create mode 100644 src/content/blogs/windows-on-arm-now-supported-in-python-3-11-release.mdx diff --git a/src/components/article/ArticleContent.astro b/src/components/article/ArticleContent.astro index d3806da..bd449cd 100644 --- a/src/components/article/ArticleContent.astro +++ b/src/components/article/ArticleContent.astro @@ -19,7 +19,8 @@ const { Content, image } = Astro.props; effects={["grayscale"]} />
diff --git a/src/components/head/BaseHead.astro b/src/components/head/BaseHead.astro index cadcd3c..a6ca3b6 100644 --- a/src/components/head/BaseHead.astro +++ b/src/components/head/BaseHead.astro @@ -23,7 +23,7 @@ const social_image = ""; - + + Graphics support ---> + Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs) +``` + +The Panfrost driver initially debuted at mesa 19.1 release, but I recommend to use the latest mesa version to get the more complete OpenGL support. In my case on the RockPi4B platform, the kernel version is v5.8.11 and the mesa version is 20.2. + +``` +$ uname -a +Linux manjaro 5.8.11-00001-g9a8f115558ca #2 SMP PREEMPT Thu Sep 24 11:12:31 CST +2020 aarch64 GNU/Linux + +$ glxinfo -B +name of display: :0.0 +display: :0 screen: 0 +direct rendering: Yes +Extended renderer info (GLX_MESA_query_renderer): + Vendor: Panfrost (0xffffffff) + Device: Mali T860 (Panfrost) (0xffffffff) + Version: 20.2.0 + Accelerated: yes + Video memory: 3806MB + Unified memory: yes + Preferred profile: compat (0x2) + Max core profile version: 0.0 + Max compat profile version: 2.1 + Max GLES1 profile version: 1.1 + Max GLES[23] profile version: 3.0 +OpenGL vendor string: Panfrost +OpenGL renderer string: Mali T860 (Panfrost) +OpenGL version string: 2.1 Mesa 20.2.0-devel (git-14a12b771d) +OpenGL shading language version string: 1.20 + +OpenGL ES profile version string: OpenGL ES 3.0 Mesa 20.2.0-devel (git-14a12b771d) +OpenGL ES profile shading language version string: OpenGL ES GLSL ES 3.00 +``` + +## OpenGL components + +To enable the OpenGL support for “qcam”, the QOpenGLWidget, QOpenGLFunctions and QOpenGLShaderProgram 3 main Qt OpenGL components would be added to handle the OpenGL format convert and frame rendering. +The new class is inherited from QOpenGLWidget and QOpenGLFunctions. + +``` +class ViewFinderGL : public QOpenGLWidget, + public ViewFinder, + protected QOpenGLFunctions +``` + +And in this new class reimplement the initializeGL(), resizeGL() and paintGL() 3 virtual functions that provided by QOpenGLWidget. +**initializeGL()** : Sets up the OpenGL resources and state. +**resizeGL()** : Sets up the OpenGL viewport, projection, etc. +**paintGL()** : Renders the OpenGL scene. + +In **initializeGL()** function, we initialize OpenGL function resolution for the current context. + +``` +oid ViewFinderGL::initializeGL() +{ +initializeOpenGLFunctions(); +``` + +And create and allocate the vertex buffer for this 2D image. + +``` +static const GLfloat coordinates[2][4][2]{ + { + /* Vertex coordinates */ + { -1.0f, -1.0f }, + { -1.0f, +1.0f }, + { +1.0f, +1.0f }, + { +1.0f, -1.0f }, + }, + { + /* Texture coordinates */ + { 0.0f, 1.0f }, + { 0.0f, 0.0f }, + { 1.0f, 0.0f }, + { 1.0f, 1.0f }, + }, + }; + + vertexBuffer_.create(); + vertexBuffer_.bind(); + vertexBuffer_.allocate(coordinates, sizeof(coordinates)); +``` + +The vertex and the texture coordinates mapping is like: - + +``` +(-1.0f, 1.0f) +----------------+ (1.0f,1.0f) + | | + | | + | | + | | +(-1.0f, -1.0f) +----------------+ (1.0f, -1.0f) +``` + +\*The vertex coordinates(\*4)\* + +``` +(0.0f, 0.0f) +----------------+ (1.0f, 0.0f) + | | + | | + | | + | | +(0.0f, 1.0f) +----------------+ (1.0f, 1.0f) +``` + +\*The texture coordinates(\*5)\* + +I.e. The vertex and texture coordinates mapping is like below + +``` +vertex coordinates <--> texture coordinates + (-1.0f, -1.0f) <--> (0.0f, 1.0f) \[lower left] + ( 1.0f, -1.0f) <--> (1.0f, 1.0f) \[lower right] + ( 1.0f, 1.0f) <--> (1.0f, 0.0f) \[top right] + (-1.0f, 1.0f) <--> (0.0f, 0.0f) \[top left] +``` + +And create the vertex shader, and set the clip-space output position of the current vertex. + +``` +attribute vec4 vertexIn; +attribute vec2 textureIn; +varying vec2 textureOut; + +void main(void) +{ + gl_Position = vertexIn; + textureOut = textureIn; +} +``` + +In **resizeGL()**, will resize the OpenGL viewport whenever the widget has been resized. + +``` +void ViewFinderGL::resizeGL(int w, int h) +{ + glViewport(0, 0, w, h); +} +``` + +In **paintGL()**, we have 2 parts. One continues the initialization of the fragment shader and the other is actually doing the rendering. I postponed the fragment shader initialization until here as initializeGL() and resizeGL() are called when the ViewFinderGL has been constructed and the camera configuration is not generated at that stage. After the camera device has been opened the camera configuration is generated and the pixel format is set, we can select and create the specific fragment shader accordingly. + +``` +bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format) +{ + bool ret = true; + switch (format) { + case libcamera::formats::NV12: + horzSubSample_ = 2; + vertSubSample_ = 2; + vertexShaderSrc_ = ":NV_vertex_shader.glsl"; + fragmentShaderSrc_ = ":NV_2_planes_UV_f.glsl"; + break; + case libcamera::formats::NV21: + horzSubSample_ = 2; + vertSubSample_ = 2; + vertexShaderSrc_ = ":NV_vertex_shader.glsl"; + fragmentShaderSrc_ = ":NV_2_planes_VU_f.glsl"; + Break; +``` + +The fragment shader only has been initialized once at first time updateGL() being called. + +``` +void ViewFinderGL::paintGL() +{ + if (!fragmentShader_) + if (!createFragmentShader()) { + qWarning() << "[ViewFinderGL]:" + << "create fragment shader failed."; + } +``` + +Another part of updateGL(), is actually doing the format conversion and rendering according to the format. +For example, NV12/NV21 these kinds of 2 planes YUV frames. + +``` + Width ++----------------------------------------+ +| | +| | +| | +| Y | Height +| | +| | +| | +| | ++----------------------------------------+ +| | +| | +| UV/VU | Height / 2 +| | ++----------------------------------------+ +``` + +*NV12/NV21 YUV frame memory map* + +The color format convert and frame rendering is done by the fragment shader. + +``` +#ifdef GL_ES +precision mediump float; +#endif + +varying vec2 textureOut; +uniform sampler2D tex_y; +uniform sampler2D tex_u; + +void main(void) +{ + vec3 yuv; + vec3 rgb; + mat3 yuv2rgb_bt601_mat = mat3( + vec3(1.164, 1.164, 1.164), + vec3(0.000, -0.392, 2.017), + vec3(1.596, -0.813, 0.000) + ); + + yuv.x = texture2D(tex_y, textureOut).r - 0.063; + yuv.y = texture2D(tex_u, textureOut).r - 0.500; + yuv.z = texture2D(tex_u, textureOut).g - 0.500; + + rgb = yuv2rgb_bt601_mat * yuv; + gl_FragColor = vec4(rgb, 1.0); +} +``` + +*The fragment shader for 2 planes YUV frame* + +``` +oid ViewFinderGL::doRender() +{ + switch (format_) { + case libcamera::formats::NV12: + case libcamera::formats::NV21: + case libcamera::formats::NV16: + case libcamera::formats::NV61: + case libcamera::formats::NV24: + case libcamera::formats::NV42: + /* Activate texture Y */ + glActiveTexture(GL_TEXTURE0); + configureTexture(id_y_); + glTexImage2D(GL_TEXTURE_2D, + 0, + GL_RED, + size_.width(), + size_.height(), + 0, + GL_RED, + GL_UNSIGNED_BYTE, + yuvData_); + shaderProgram_.setUniformValue(textureUniformY_, 0); + + /* Activate texture UV/VU */ + glActiveTexture(GL_TEXTURE1); + configureTexture(id_u_); + glTexImage2D(GL_TEXTURE_2D, + 0, + GL_RG, + size_.width() / horzSubSample_, + size_.height() / vertSubSample_, + 0, + GL_RG, + GL_UNSIGNED_BYTE, + (char *)yuvData_ + size_.width() * size_.height()); + shaderProgram_.setUniformValue(textureUniformU_, 1); + break; +``` + +The code has already been merged into the libcamera git tree, but if you’re interested in the specific patches that implement what we’ve talked about in this post then please take a look at the following commits + +* [https://git.linuxtv.org/libcamera.git/commit/?id=4a4a3e715b8314c56a2a32788d92fdec464af7b7](https://git.linuxtv.org/libcamera.git/commit/?id=4a4a3e715b8314c56a2a32788d92fdec464af7b7) +* [https://git.linuxtv.org/libcamera.git/commit/?id=2daa704c968c8aa7a4b209450f228b41e9d42d85](https://git.linuxtv.org/libcamera.git/commit/?id=2daa704c968c8aa7a4b209450f228b41e9d42d85) +* [https://git.linuxtv.org/libcamera.git/commit/?id=9db6ce0ba499eba53db236558d783a4ff7aa3896](https://git.linuxtv.org/libcamera.git/commit/?id=9db6ce0ba499eba53db236558d783a4ff7aa3896) +* [https://git.linuxtv.org/libcamera.git/commit/?id=219cbfe76b5a7d9d8206c71aa6115ff8befcff9b](https://git.linuxtv.org/libcamera.git/commit/?id=219cbfe76b5a7d9d8206c71aa6115ff8befcff9b) + +## Conclusion + +After moving the format conversion to the GPU, the qcam frame rate improved a lot. On the RockPi4b platform the frame rate reached **30.0x** fps with the capture resolution set to **1920x1080**. + +### References + +1. [https://en.wikipedia.org/wiki/YUV](https://en.wikipedia.org/wiki/YUV) +2. [https://www.linuxjournal.com/content/image-processing-opengl-and-shaders](https://www.linuxjournal.com/content/image-processing-opengl-and-shaders) +3. [https://github.com/gjasny/v4l-utils/blob/master/utils/qv4l2/capture-win-gl.cpp#L1547](https://github.com/gjasny/v4l-utils/blob/master/utils/qv4l2/capture-win-gl.cpp#L1547) +4. [https://learnopengl.com/Getting-started/Hello-Triangle](https://learnopengl.com/Getting-started/Hello-Triangle) +5. [https://learnopengl.com/Getting-started/Textures](https://learnopengl.com/Getting-started/Textures) + +## About the Author + +Show is an engineer within the Multimedia Working Group. For further information on this group, click [here](/client-devices/). diff --git a/src/content/blogs/add-support-to-retrieve-correct-ta-load-address-with-aslr-enabled.mdx b/src/content/blogs/add-support-to-retrieve-correct-ta-load-address-with-aslr-enabled.mdx new file mode 100644 index 0000000..567c9a0 --- /dev/null +++ b/src/content/blogs/add-support-to-retrieve-correct-ta-load-address-with-aslr-enabled.mdx @@ -0,0 +1,26 @@ +--- +title: Add Support to Retrieve TA Load Address w/ ASLR Enabled +description: This blog details how two interns enabled debugging in Open + Portable Trusted Execution Environment (OPTEE) using GDB. Read more here. +date: 2020-11-26T12:46:01.000Z +image: linaro-website/images/blog/intern +tags: + - open-source +author: paolo-valente +related: [] + +--- + +## Proud to Support Internships. + +Recently two interns worked with Linaro to solve a real engineering problem. We are grateful for the work they undertook and this blog details their achievements: - + +Luca and Simone have enabled debugging in Open Portable Trusted Execution Environment (OPTEE) using GDB. We added support to retrieve the correct load addresses of Trusted Applications (TA) even when ASLR (Address Space Layout Randomization) is enabled. ASLR is a new functionality that shuffles memory addresses to improve security against malicious memory accesses; in this context, the load address of a TA is unknowable at prior, requiring a check at execution time to build the symbols table of GDB with the actual addresses. + +## Contribution + +ASLR (Address Space Layout Randomization) in OP-TEE environment randomizes memory locations of executing applications to improve security against malicious accesses. This feature doesn't allow to debug an application without considering this randomization, because locations of executed applications are set at runtime. + +So, for GDB to be able to debug an application, it has to retrieve the correct load address of that application. This is exactly what this script does, getting the actual load address from OP-TEE to allow debugging. + +This contribution was developed under the supervision of Joakim Bech and Jens Wiklander from Linaro. Patches have been sent individually to the OP-TEE maintainers who are evaluating and refining the patches so they should be ready to be included in the OP-TEE project. diff --git a/src/content/blogs/android-13-now-available-on-qualcomm-reference-boards-rb5-and-rb3.mdx b/src/content/blogs/android-13-now-available-on-qualcomm-reference-boards-rb5-and-rb3.mdx new file mode 100644 index 0000000..57cea6b --- /dev/null +++ b/src/content/blogs/android-13-now-available-on-qualcomm-reference-boards-rb5-and-rb3.mdx @@ -0,0 +1,43 @@ +--- +title: Android 13 now available on Qualcomm Robotics Reference RB3 and RB5 Platforms +description: This blog talks about the Android 13 release and how it boots + straight out of the box on the Linaro supported Reference Boards RB5 and RB3. +date: 2022-08-18T05:05:29.000Z +image: linaro-website/images/blog/Client_Devices_banner_pic +tags: + - android +author: amit-pundir +related: [] + +--- + +The source code for the latest Android release - Android 13 - is out! The Android 13 release tag (android13.0.0\_r3) and AOSP/master branch boot straight out of the box on both the Linaro supported Qualcomm [Reference Boards in AOSP](https://source.android.com/docs/setup/build/devices) - Qualcomm Robotics Board RB5 and Dragonboard 845c (DB845c), also known as RB3. + +![An image of a Qualcomm RB5 running Android 13 Easter Egg](/linaro-website/images/blog/qualcomm-rb5-running-android-13-easter-egg) + +This is a significant step forward if we look back in time. A few years ago, on many development boards, it could have taken weeks to get a new Android release to work. Issues would usually need to be fixed and features would not always immediately work. The latest release boots to UI seamlessly, saving developers a lot of bring-up time and hassle. + +# How has this been made possible? + +Over the years Linaro has worked together with Google to constantly keep 96Boards development boards working and in-sync with the upstream Kernel versions and AOSP. Hardware with good software support is essential for testing and validation of the latest AOSP and latest stable and upstream kernels. The collaboration with Google and the upstream community, combined with the upstreaming efforts of Linaro’s Android team and Linaro’s landing team for Qualcomm have brought us to where we are today. And while there is always more work to do, being able to boot the latest Android release on a development board straight out of the box is a great satisfaction! + +A perspective on the history of our efforts with development boards, and why these efforts are essential, is available in one of our previous [Virtual Linaro Connect sessions](https://www.google.com/url?q=https://resources.linaro.org/en/resource/8sjfJfUNX3qitL5MW6Tbfz\&sa=D\&source=docs\&ust=1660846392972003\&usg=AOvVaw3TEWa1FpakA8ohEZoIW_pa). + +# How do I get Android 13 to boot on my Dragonboard? + +We support booting DB845c and RB5 with the same set of AOSP images (db845c-userdebug), thanks to our [previous efforts to support unified boot images](https://www.linaro.org/blog/supporting-multiple-devices-with-the-same-aosp-images/) on these devices. Interested developers can download the db845c-userdebug prebuilt images that we used for smoke testing Android 13 and AOSP from these locations: + +* For android13.0.0\_r3 based images: [https://people.linaro.org/\~amit.pundir/db845c-userdebug-android13/](https://people.linaro.org/~amit.pundir/db845c-userdebug-android13/) +* For aosp-master-android13 images: [https://people.linaro.org/\~sumit.semwal/db845c-userdebug-aosp-master-android13/](https://people.linaro.org/~sumit.semwal/db845c-userdebug-aosp-master-android13/) + +In our limited smoke testing so far, we noticed a WiFi regression which is being fixed [here](https://android-review.googlesource.com/c/device/linaro/dragonboard/+/2188025/), and a Bluetooth regression on the android13.0.0\_r3 tag because of a [missing patch](https://android-review.googlesource.com/c/device/linaro/dragonboard/+/2103025/) from AOSP/master branch, which did not make it to the release tag. + +One can also download AOSP db845c-userdebug prebuilts from our daily build page [https://snapshots.linaro.org/96boards/dragonboard845c/linaro/aosp-master/](https://snapshots.linaro.org/96boards/dragonboard845c/linaro/aosp-master/) + +For advanced users who want to build the AOSP images from source, please follow the instructions from here [https://source.android.com/docs/setup/build/devices](https://source.android.com/docs/setup/build/devices) + +# Want to learn more? + +Join us on Tuesday 6 September for our virtual Linaro and Qualcomm Tech Day to know more about Linaro's Android team efforts to support these reference boards in AOSP - you can register for free[ here](https://www.linaro.org/events/linaro-and-qualcomm-present-qualcomm-tech-day/). + +For more information on the work we do to keep these devices in sync with AOSP, go to our [Software Device Enablement for Android project page](https://linaro.atlassian.net/wiki/spaces/SDEFAU/overview). diff --git a/src/content/blogs/aosp-on-pixel3-pocof1-running-aosp-with-mainline-kernel-on-form-factor-devices.mdx b/src/content/blogs/aosp-on-pixel3-pocof1-running-aosp-with-mainline-kernel-on-form-factor-devices.mdx new file mode 100644 index 0000000..b65f35c --- /dev/null +++ b/src/content/blogs/aosp-on-pixel3-pocof1-running-aosp-with-mainline-kernel-on-form-factor-devices.mdx @@ -0,0 +1,31 @@ +--- +title: AOSP on Pixel3/PocoF1 (Running AOSP with mainline kernel on form-factor + devices) +description: In this article, Amit Pundir takes a detailed look at running AOSP + with mainline kernel on form-factor devices. Read about his findings here! +date: 2020-04-15T10:37:57.000Z +image: linaro-website/images/blog/30921188158_953bca1c9f_k +tags: + - android +author: amit-pundir +related: [] + +--- + +Recently, the Dragonboard 845c (Qualcomm's SDM845 based 96board) [landed in AOSP](/blog/dragonboard-845c-in-aosp/). One of the best things about the Dragonboard 845c (SDM845 SoC to be precise) is that it is actively being worked upon upstream by the Linaro Qualcomm Landing Team and supports an open graphics (mesa/freedreno) stack. Even all but one of the device firmware files are available in the upstream linux-firmware project repository. Having a fully open-source kernel and userland stack makes Dragonboard 845c a very exciting board from AOSP development point of view. What further adds to the excitement around the board is the fact that the SDM845 SoC has been widely shipped in many form-factor devices, making it a great starting point for enabling a fully open Android form-factor device. Having a form-factor device that one can test the latest mainline kernels with the latest AOSP/master changes has long been a desired goal in the Linaro Consumer Group (LCG), and going back a few years we made a similar effort on the [Nexus 7 device](https://bloggingthemonkey.blogspot.com/2016/05/freedreno-not-so-periodic-update.html). Some of the rationale and benefits of this have been covered in previous Linaro Connect talks: [SFO15 401 Mainline on form factor devices / Improving AOSP.](https://www.youtube.com/watch?v=7BVFRIHY7fI) + +The Google Pixel3 phone was the obvious choice for the next form-factor device effort by the Linaro Consumer Group, with an unlocked bootloader and device support already in AOSP. Once we figured out how to work around bootloader checks on Google’s Pixel3 (SDM845 based) phone, we started utilising SDM845 upstream support for running the mainline kernel on the Pixel3. Leveraging the Dragonboard 845c work, we were quickly able to get the device booting from storage, and usb-gadget support working. In addition, we needed support for the LAB/IBB regulators, which provide the power supplies for LCD and AMOLED display panels. These are required to power the panels on SDM845 platforms, but the driver for these is not yet upstream, so we utilized work-in-progress patches from the lists. We then converted the downstream dts-based Pixel3 command mode panel driver to an upstream-style drm panel driver. Soon we hit a wall while enabling the display panel, as it uses Display Stream Compression (DSC), which is not yet supported upstream on Qualcomm hardware, but is actively being worked on. So while the device boots to UI, the screen output is garbled at the moment. + +![class=medium-inline poco-f1-settings](/linaro-website/images/blog/poco-f1-settings-page) + +To integrate support into AOSP, we created a “pixel3\_mainline” build target (to differentiate it from the official “blueline” codename used in AOSP), and pushed it along with the Dragonboard 845c support. The goal of this newly added pixel3\_mainline-userdebug build target is to run AOSP on Pixel3 device with mainline linux kernel and open graphics stack (mesa/freedreno), unlike AOSP's official aosp\_blueline-userdebug build target for Pixel3 which runs android-4.4 kernel with proprietary closed source services and binaries. Status as of today is that pixel3\_mainline-userdebug build boots but with garbled output on screen, but is accessible via ADB. We hardcode bootargs in the kernel and enforce it because we can’t boot with bootargs appended by the bootloader during bootup. We also configure the system partition as a super partition and have not yet moved to retrofit dynamic partition support. Currently we only support booting with the Android P bootloader because Android-10 bootloaders need userspace fastbootd support which is currently missing in our build. You can find the How To instructions at [https://wiki.linaro.org/AOSP/blueline](https://wiki.linaro.org/AOSP/blueline) + +Meanwhile we started looking into Pocophone’s F1 phone, a similar Snapdragon 845 based device, which uses a panel that doesn’t require DSC support. With a relatively small amount of work, in order to add support for the PocoF1 panel, we quickly boot AOSP upto UI with the mainline linux kernel. Since the Dragonboard 845c support was already in AOSP utilizing the Android Generic Kernel Image (GKI), we could just re-use the GKI and the Dragonboard 845c kernel modules along with local (vendor specific) panel and regulator driver modules, demonstrating the future potential of the Android GKI initiative: + + + +Status as of today is that PocoF1 AOSP build boots to UI and Bluetooth (HID/Audio) works, but touch-input, WiFi and Audio are still work in progress. On WiFi, we are stuck at needing to allocate a special type of protected shared memory region. Unfortunately without this special allocation type, PocoF1 just reboots during boot, due to unauthorized access to that shared memory region. There is no plan to submit PocoF1 support in AOSP or provide support in any form and it will stay out of the tree. We use PocoF1 only for development purposes. You can find the How To instructions on [github](https://github.com/pundiramit/device-xiaomi-beryllium/blob/master/README.md). + +The upstream story for Pixel3 and PocoF1 isn’t much different from the state of the Dragonboard 845c. Other than the Dragonboard 845c pending patchset, we need to upstream working vendor Panel and Regulator drivers. Additionally, we need to push the device-tree files needed to support the phones. The only blocker is the upstreaming of Qualcomm specific board-id and msm-im device-tree properties which was NACKed last time it was submitted:[\[PATCH v2 1/3\] devicetree: bindings: Document qcom,msm-id and qcom,board-id](https://lkml.org/lkml/2015/3/4/1241). These properties are used by MSM bootloaders during boot-up to pass the correct device tree to the kernel. Qualcomm Android devices do not boot if these properties are missing in the device-tree. + +A big thank you to Linaro’s Qualcomm Landing Team, Google’s Android Systems Team, along with Rob Clark and other developers on the freedreno effort for helping out in the bringup and resolving issues! diff --git a/src/content/blogs/arm-transfers-cmsis-pack-technology-to-linaro.mdx b/src/content/blogs/arm-transfers-cmsis-pack-technology-to-linaro.mdx new file mode 100644 index 0000000..6892363 --- /dev/null +++ b/src/content/blogs/arm-transfers-cmsis-pack-technology-to-linaro.mdx @@ -0,0 +1,43 @@ +--- +title: "Arm Transfers CMSIS-Pack Technology to Linaro " +description: In this article, Francois Ozog looks at the CMSIS-Pack Technology + which has been trasnfered from Arm to Linaro. Read here about the goals of the + project. +date: 2021-06-02T00:13:31.000Z +image: linaro-website/images/blog/IoT-bg +tags: + - arm + - iot-embedded +author: francois-ozog +related: [] + +--- + +*For a more recent blog post on the Open CMSIS Pack project, please see [Recent developments in the Open-CMSIS-Pack Project](https://www.linaro.org/blog/recent-developments-in-the-open-cmsis-pack-project/).* + +The IoT is on the verge of incredible growth – arguably, it has been for years but what has changed is the intersection in maturity and availability of several key catalysts. These include devices providing more compute capabilities, improved connectivity and increased security threats as well as the rapid evolution of machine learning. + +For the software ecosystem to capitalise on the opportunities for IoT innovation at scale, there is a need to improve the compatibility of software for component re-use, which has long been a challenge in the IoT landscape. + +To address the challenges facing software combability for IoT and embedded microcontroller devices, Arm is transferring [CMSIS-Pack technology](https://developer.arm.com/tools-and-software/embedded/cmsis/cmsis-packs) to the Linaro IoT and Embedded Group under a new project named Open-CMSIS-Pack. CMSIS-Pack technology already provides device support for close to 9,000 different microcontrollers, making project integration of drivers, middleware and other software components across multiple Arm-based devices much easier. + +# Introducing the Open-CMSIS-Pack Project + +The Open-CMSIS-Pack project aims to deliver a standard for software component packaging and related foundation tools for validation, distribution, integration, management, and maintenance. + +The initial focus of the Open-CMSIS-Pack project is command-line tools and CMake workflows that enable the broader ecosystem to integrate CMSIS-Pack-based development flows. This project is the starting point for evolving the CMSIS-Pack technology into a true open standard for MCU software component packaging, targeting key interfaces for major IoT platforms and producing a framework that can be embraced across the ecosystem. + +# Timeline + +The Open-CMSIS-Pack project was established in April 2021. Linaro and project members will be working on several releases over the coming months, which will be focussed on the following: + +* Create command-line tools for project builds based on software packs +* Create workflows and utilities for the verification of software packs +* Extend the pack description format for better usability across the complete workflow +* Define processes that simplify the creation of software packs from other sources, such as CMake based projects +* Develop the concept of a software layer that defines a collection of pre-configured software components +* Organize the taxonomies of standard APIs that are essential for re-useable software stacks + +STMicroelectronics, NXP Semiconductors and Arm are the founding members of the Open-CMSIS-Pack project. We welcome contribution and participation from other organisations. If you would like to support and contribute to the Open-CMSIS-Pack project, please contact contact@linaro.org. + +For further information about the Open-CMSIS-Pack project, visit [open-cmsis-pack.org](https://www.open-cmsis-pack.org/). Alternatively if you want to find out more about Linaro and the work we do, make sure to [get in touch](https://www.linaro.org/contact/)! diff --git a/src/content/blogs/automatic-detection-and-reporting-of-performance-regressions.mdx b/src/content/blogs/automatic-detection-and-reporting-of-performance-regressions.mdx new file mode 100644 index 0000000..a688287 --- /dev/null +++ b/src/content/blogs/automatic-detection-and-reporting-of-performance-regressions.mdx @@ -0,0 +1,35 @@ +--- +title: Automatic detection and reporting of performance regressions +description: In this blog, Paolo Valente and Federico Gelmetti talk about the + progress that has been made on extending Linaro Kernel Functional Test (LKFT) + functionalities to include automatic detection and reporting of performance + regressions. All of this work will ultimately help maintainers spot + regressions and remove them. +date: 2021-11-16T05:09:40.000Z +image: linaro-website/images/blog/tech_background +tags: + - linux-kernel + - testing + - toolchain +related_projects: + - LKQ +author: paolo-valente +related: [] + +--- + +By Paolo Valente and Federico Gelmetti + +In June 2021 we published [a blog](https://www.linaro.org/blog/ensuring-optimal-performance-through-enhanced-kernel-testing/) where we talked about the efforts being made to extend LKFT (Linaro Kernel Functional Test) functionalities to include automatic detection and reporting of performance regressions. + +As stated in that blog, [a patchset was created](https://github.com/Linaro/test-definitions/commit/de4c57c2b8d3d877001b898a601b7753d23d2cfc) for [Linaro test definitions](https://github.com/Linaro/test-definitions/), in order to run [mmtests](https://github.com/gormanm/mmtests) benchmarks in LAVA (Linaro Automation and Validation Architecture), publish the result to SQUAD (Software Quality Dashboard) and then do post processing to find out if we have regressed between the different kernel versions. In addition to this patchset, we are also trying to glue all the pieces together to have a fully working pipeline that can actually notify the people involved if a regression has happened. + +The first step we took to achieve a fully working pipeline was to verify that any arbitrary mmtests benchmark other than sysbenchcpu (which we have running in the pipeline as of today) could be run from test-definitions. We managed to make test-definitions run dbench4 from mmtests benchmarks selection with little tweaks to the existing interface, so we can assume that most tests can be configured with the same amount of effort. + +Result data, now generated in JSON format, were useful for simulating the next step, which was to pull the metric's data from SQUAD via a script that retrieves the data in a JSON format. This allowed us to run the regression detection script on this data by feeding it into squad-report (the tool that is responsible for creating a report with the regressions), which then got sent to the people involved. + +As it stands, this pipeline is very close to being complete and functional, only missing two major components: on one end, the JSON manipulation and report creation script (for which a [work is in progress](https://gitlab.com/Linaro/lkft/reports/squad-report/-/merge_requests/102) already; on the other end, the creation of a root filesystem with all benchmark pre-installed, which would be ready for use in the LKFT environment. + +With these last pieces, plus some more tweaks and minor fixes, the pipeline will finally be operational. We personally would like to see that in action, as it will help maintainers spot regressions and get rid of them. + +For more information, go to [https://lkft.linaro.org/ ](https://lkft.linaro.org/) diff --git a/src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx b/src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx new file mode 100644 index 0000000..8c0c6a6 --- /dev/null +++ b/src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx @@ -0,0 +1,59 @@ +--- +title: Introducing Linaro Virtual Connect Fall 2021 - Linaro +description: > + Within this article, we announce our schedule for our Linaro Virtual Connect + Fall 2021 which will consist of 70+ technical keynotes & sessions. Read more + here. +date: 2021-08-18T12:57:31.000Z +image: linaro-website/images/blog/48784720458_63040ac998_k +strap_image: /assets/images/content/48784720458_63040ac998_k.jpg +tags: + - linaro-connect +author: connect +related: [] + +--- + +# Introducing Linaro Virtual Connect Fall 2021: Automotive, Hyperscalers, Testing on Arm and more + +Linaro Virtual Connect Fall 2021 (LVC21F) took place online from September 8-10, 2021. Linaro Connect is a technical event that runs twice a year that brings together the Arm Ecosystem to discuss recent achievements and future work needed to enable Arm architecture. + +Linaro Virtual Connect Fall 2021 consisted of 70+ technical keynotes and sessions, spanning a range of topics including Security, OS Build & Test, Automotive, Windows on Arm, IoT and Embedded, and more. We welcomed keynote speakers from Qualcomm, Huawei, Google, Informa Tech which you can read about below. We also hosted a number of panels on topics such as Windows on Arm and SystemReady. For the first time in Linaro’s history we hosted a Virtual Demo Showcase. + +#### Introducing our keynote speakers: + +[From Mobile to Automotive: Delivering Intelligent, Next-Gen Digital Cockpit Solutions](https://events.pinetool.ai/2231/#sessions/67121?referrer%5Bpathname%5D=%2Fsessions\&referrer%5Bsearch%5D=\&referrer%5Btitle%5D=Sessions) + +Vasanth Waran; Senior Director, Product Management, Qualcomm Technologies, Inc. + +[The CodeLinaro Story](https://events.pinetool.ai/2231/#sessions/67122) + +Lisa Lammens; Director, Program Management, Open Source Group at Qualcomm Technologies, Inc. + +[Linaro's Next Decade: Scaling the ARM Ecosystem for 20x Growth](https://events.pinetool.ai/2231/#sessions/67146?referrer%5Bpathname%5D=%2Fsessions\&referrer%5Bsearch%5D=\&referrer%5Btitle%5D=Sessions) + +Bryan Che; Chief Strategy Officer, Huawei + +[Hyperscaler testing platform keynote](https://events.pinetool.ai/2231/#sessions/72699?referrer%5Bpathname%5D=%2Fsessions\&referrer%5Bsearch%5D=\&referrer%5Btitle%5D=Sessions) + +David Munday, Google + +[Innovation and disruption of software and hardware architecture in automotive Wards Intelligence ](https://events.pinetool.ai/2231/#sessions/67170?referrer%5Bpathname%5D=%2Fsessions\&referrer%5Bsearch%5D=\&referrer%5Btitle%5D=Sessions) + +Luca De Ambroggi, Chief Analyst, Informa Tech Automotive Group + +#### Featured panels: + +[SystemReady Panel ](https://events.pinetool.ai/2231/#sessions/67180?referrer%5Bpathname%5D=%2Fsessions\&referrer%5Bsearch%5D=\&referrer%5Btitle%5D=Sessions) + +Panelists: Masami Hiramatsu, Tech Lead at Linaro;  Samer El-Haj-Mahmoud, System Architect at Arm; Peng Fan, Senior software Engineer at NXP; Marcin Wojtas, Head of Engineering at Semihalf; Paul Liu, Linaro Senior Software Engineer at Linaro; and more. + +**Virtual Demo Showcase** + +We hosted our first ever Virtual Demo Technology Showcase on Thursday, September 9. If you’ve ever been to a Linaro Connect in the past, you may remember our Demo Friday Technology showcase. Participants of Demo Friday prepared a technology demo on a wide variety of Arm Software topics including— OS Build & Test, Edge Computing and its many use cases, Linaro and community enablement including Open source development, Native software development, etc. + +**Make sure to register to start building your schedule!** + +Registration was open and it is free to attend. Two weeks before Linaro Connect, the event platform opened to all registered attendees. + +All resources relating to this Linaro Connect event can be accessed [here](https://resources.linaro.org/en/tags/f4f57bc3-0bc0-4229-9cd0-0160f803f36c). For more information about Linaro and the work we do, [get intouch](https://www.linaro.org/contact/)! diff --git a/src/content/blogs/bfq-saved-me-from-thrashing.mdx b/src/content/blogs/bfq-saved-me-from-thrashing.mdx new file mode 100644 index 0000000..4cad331 --- /dev/null +++ b/src/content/blogs/bfq-saved-me-from-thrashing.mdx @@ -0,0 +1,36 @@ +--- +title: BFQ saved me from thrashing +description: In this article Linus Walleij looks at what causes thrashing and + developments that help to mitigate this problem. +date: 2020-09-02T09:13:04.000Z +image: linaro-website/images/blog/technology-3389917_1920-1- +tags: + - linux-kernel +related_projects: + - LSE +author: linus-walleij +related: [] + +--- + +## Benefits of the BFQ I/O scheduler + +Recently my less-used desktop computer became sluggish, and would randomly crash. It seemed to be fully occupied with disk activity and quickly became uninteractive to the point that not even ssh login would work. This is easily identified as [thrashing](https://en.wikipedia.org/wiki/Thrashing_\(computer_science\)): constantly swapping to disk because of short core memory. + +When Linux runs out of memory, processes will of course be killed by the [OOM](https://en.wikipedia.org/wiki/Out_of_memory) (out of memory) killer, but if you have ample swap space, instead you will get thrashing. In this case the OOM killer would have been better: the system was so uninteractive that there is no point in trying to use swap. This was on a flash drive but still would just thrash. + +Normally you would interact with the machine through the UI or a terminal to shut down some processes, but it would not work: the memory used by the interactive processes like the desktop itself or even an SSH terminal was subject to swap! + +After an update to the latest Fedora distribution the thrashing was the same but with one difference: the UI did not become completely uninteractive, making it possible to close down e.g. the web browser and recover the system. + +The thrashing was caused by one of the DIMMs in the computer starting to malfunction reducing the core memory to a mere 4GB. (I have since replaced the memory.) + +Something happened in Fedora that made it cope better with thrashing. + +The most likely improvement in [Fedora is BFQ the Budget Fair Queue block scheduler](https://www.youtube.com/watch?v=l7j1AqTZKG4), that will use heuristics to keep the interactive processes higher in priority. This was recently made default for single queue devices in Fedora using a udev ruleset. + +My flash drive was a single queue elder device – no fancy NVME – so it would become the bottleneck while constantly swapping, but with BFQ in between the interactive processes got a priority boost and the system remains interactive under this heavy stress, and swapping is again a better alternative to the OOM killer. + +Having worked a bit with BFQ over the years this is a welcome surprise: the user-perceived stability of the system is better. + +This might also illustrate the rule to make swap space around double the physical memory: now that my swap space suddenly became 4 times the physical memory the OOM killer would never step in, if it was just 2 times the physical memory, maybe it would. (I do not know if this holds or if the thrashing would be the same.) diff --git a/src/content/blogs/budget-fair-queueing-bfq-linux-io-scheduler-optimizations-for-multi-actuator-sata-hard-drives.mdx b/src/content/blogs/budget-fair-queueing-bfq-linux-io-scheduler-optimizations-for-multi-actuator-sata-hard-drives.mdx new file mode 100644 index 0000000..33feb87 --- /dev/null +++ b/src/content/blogs/budget-fair-queueing-bfq-linux-io-scheduler-optimizations-for-multi-actuator-sata-hard-drives.mdx @@ -0,0 +1,84 @@ +--- +title: BFQ Linux IO Scheduler Optimizations +description: In this blog, we cover the extra logic applied in BFQ I/O scheduler + to support multi-actuator drives. Paving the way to exploiting potential + multi-actuator drives. +date: 2021-12-02T06:49:56.000Z +image: linaro-website/images/blog/road-timelapse +tags: + - linux-kernel +related_projects: + - PERF +author: paolo-valente +related: [] + +--- + +Computer operating systems use Input/output (I/O) scheduling to determine in which order operations should take place. BFQ is a proportional-share I/O scheduler which associates each process with a weight. Based on the weight, it then decides how much of the I/O bandwidth to allocate to a process. + +In this article, Linaro Interns Gabriele Felici and Davide Zini (followed by Paolo Valente) talk about the extra logic they have implemented in the BFQ I/O scheduler, to support multi-actuator drives. + +## The challenge of managing the load balance among actuators + +New recording technology is driving HDD capacity to 60TB+ per spindle. Yet servo-mechanical capability does not increase with areal density. As a consequence, the speed of high-capacity drives is becoming too low for reading/writing all the data that the drive can store. More formally, drives suffer more and more of low Input/Output Operations Per Second (IOPS) per TB. Therefore, reaching higher IOPS becomes increasingly important as drive capacities grow. + +Multi-actuator drives are an effective response to this need. Multi-actuator drives appear as a single device to the I/O subsystem. Yet they address commands to different actuators internally, as a function of Logical Block Addressing (LBAs). A given sector is reachable by only one actuator - none of the address space is shared. + +For example, Seagate’s Serial Advanced Technology Attachment (SATA) version contains two actuators and maps the lower half of the SATA LBA space to the lower actuator and the upper half to the upper. There are no changes to the IO protocol, except for a log page to report the LBA-actuator mapping. + +Yet, this new architecture poses the following important challenge: information on the destination actuator of each command must be used cleverly by the I/O subsystem. Otherwise the system has little or no control over the load balance among actuators; some actuators may be underutilized or remain totally idle. Seagate asked Linaro to address this important issue, in the first place for their drives, but in general for any multi-actuator drive. In particular, they asked for an open-source solution, within the Linux kernel. + +## The solution: Enriching the BFQ/IO scheduler with extra logic + +I/O schedulers are the ideal kernel components for tackling this problem, as their role is to decide the order in which to dispatch commands. In this respect, Budget Fair Queueing (BFQ) is the most feature-rich and accurate I/O scheduler in Linux. It provides strong service guarantees on bandwidth and latency. In addition, BFQ has a rich infrastructure, which allows for accurate control over I/O. This makes BFQ a good ground for implanting extra logic that also controls per-actuator load. + +In collaboration with Seagate Technology, we have enriched the BFQ I/O scheduler with such extra logic. The resulting extended version of BFQ provides dramatic performance improvements, over a wide range of workloads. At the same time, it preserves the original bandwidth and latency guarantees of BFQ. As a more general contribution, the concepts and strategies used in BFQ show effective ways to take advantage of the IOPS gains of multi-actuator drives. + +### Adding initial support for multi-actuator drive inside BFQ + +by Gabriele Felici + +If a standard I/O scheduler, including an unimproved BFQ scheduler, is used over a multi-actuator drive, some actuators may remain idle, while other actuators may take care of all the requests. This is a performance waste if we think about the potential that the drive has. What we need is a mechanism to control each actuator load. + +Since BFQ exploits a queue of I/O requests for each process, we started with the following simple but powerful idea: we split each per-process queue into one queue for each actuator, to guarantee that each actuator is taken care of by the scheduler. + +### Boosting performance using injection + +by Davide Zini + +BFQ dispatches to the drive as many I/O as the I/O subsystem deems appropriate. In particular, new I/O may be dispatched even while there is already some other I/O in service in the drive. So the drive’s internal parallelism or pipelining can be fully exploited. In particular, actuators can run in parallel. + +Yet, even after the above split, there are situations where one or more actuators are underutilized. A first simple case is with just two queues, Q1 and Q2, for a dual-actuator drive. Q1 and Q2 only contain requests for, respectively, the lower and the upper actuator. BFQ serves one queue at a time, for a while. If we represent requests for the lower/upper actuator as blue/red rectangles, then the service is as depicted in next figure: + +![Boosting performance using injection image 1](/linaro-website/images/blog/boosting-performance-using-injection-image-1) + +While BFQ serves only Q1 , the upper actuator gets idle, and vice versa. + +To describe a much worse situation, consider now a scenario where several queues, all with the same weight, have pending I/O. In this case, BFQ schedules queues in such a way that each queue gets the same number of I/O per second served, on average. Trouble occurs if many of these queues have I/O for some lucky actuators, while only few for some unlucky actuators. Unlucky actuators get a low number of I/O per second, and tend to be idle or little utilized most of the time. + +To address this underutilization, we added a new scheduling action. While serving a queue that contains I/O for a given actuator, inject (dispatch) some I/O requests for other actuators, if the latter are underutilized. + +In this respect, BFQ already has an injection mechanism, to boost throughput in the presence of synchronous I/O. While a synchronous queue is temporarily idle (but is in service), BFQ may inject extra I/O taken from other non-in-service queues. + +The idea is to extend this feature to an additional system state. The new state is: there are requests for one actuator in the drive queue, but there is no request, or too few requests for the other actuators. So, the idle state now does not concern a queue (which happens to be empty), but one or more actuators (which happen to have no request, or too few requests to serve). + +In this case, BFQ may choose not to dispatch the next-to-serve request of the in-service queue, but to instead inject a request of another non-in-service queue, if this allows the (too) idle actuator to be fed. For example, given the simple scenario above with just two queues, the service scheme becomes as follows: + +![Boosting performance using injection image 2](/linaro-website/images/blog/boosting-performance-using-injection-image-2) + +That is, while Q1 is being served, some requests from Q2 are however injected, if the upper actuator is too idle, and vice versa. + +A last, important piece of information for implementing this mechanism is when we consider an actuator underutilized. To this goal, we defined a load threshold: if the number of I/O requests queued (inside the drive) for a given actuator is below this threshold, then we deem that actuator underutilized. Injection occurs if one actuator is below this threshold. Empirical and technical information hinted at four as an optimal threshold, for keeping all actuators busy enough. + +The following plot shows an example of the throughput boost provided by this mechanism, on a Seagate Exos 2X14 dual-actuator drive, and for a workload made of two parallel flows of small, sequential reads. BFQ with injection, outperforms all other I/O schedulers. + +![Example of throughput boost provided by injection mechanism](/linaro-website/images/blog/example-of-throughput-boost-provided-by-injection-mechanism) + +Rather importantly, BFQ’s performance is stable across workloads, while that of the other schedulers is essentially a matter of luck. + +## Conclusion + +The above contributions pave the way to fully exploiting the potential of multi-actuator drives. +Yet these are still preliminary contributions. Our current results only cover a few (yet relevant) workloads. Another issue is what is the best choice of value for the injection threshold? It most certainly depends on the workload so the best choice may be dynamic depending on the workload. Yet a good, static value such as currently provided could provide acceptable performance. + +For more information, have a look at the last two presentations on this topic: at [Storage Developer Conference 2021](https://www.snia.org/educational-library/bfq-linux-io-scheduler-optimizations-multi-actuator-sata-hard-drives-2021), and at [Linaro Virtual Connect 2021](https://resources.linaro.org/en/resource/9xXCrNtX3WNTQr3nAtzNuk). diff --git a/src/content/blogs/can-we-make-ai-super.mdx b/src/content/blogs/can-we-make-ai-super.mdx new file mode 100644 index 0000000..83e3af3 --- /dev/null +++ b/src/content/blogs/can-we-make-ai-super.mdx @@ -0,0 +1,83 @@ +--- +title: Can we make AI Super? +date: 2019-11-20T10:24:06.000Z +image: linaro-website/images/blog/abstract1 +tags: + - ai-ml + - hpc +author: paul-isaacs +related: [] + +--- + +Linaro works with hardware vendors and software developers to help coordinate and build the toolkits for improved calculation libraries. We work to defragment the market by supporting ONNX, TFLite and TVM to translate to Arm NN supported inferencing optimised hardware. Linaro's HPC group aims to assist in optimising libraries and infrastructure dependencies that distribute the calculation requirements across servers, clusters, HPC nodes and supercomputers. But beyond Machine Learning and inferencing, where is the full scope of the truly cognitive AI? In this blog, Linaro's HPC Tech Lead Paul Isaac's talks about the history of AI and future opportunities made possible through super computing. + +## **Can we make AI super?** + +Over several millennia, human creativity has generated more than a thousand deities and doctrines, for which individuals and groups have chosen (or born into), to align with. The expectation that having faith will guide and help solve all problems. Are we looking towards AI to be the ultimate problem solver? + +Technology is a modern term but applies equally to historic and current complex systems designed to aid physical or mental tasks. Technology, orders of magnitude greater than a culture has previously known, can appear to be some kind of magic. Depending on how the technology is wielded it can be a source of destruction or benefit. The complexities of faith in humanity can not easily be resolved to a binary-style paradigm upon which technology is based on for decision making. There are nuances, contexts, hyperbole, lies and buried within, truths. + +Artificial Intelligence is a term first coined in the last 100 years to aspire to describe the potential of electromechanical technology to perform functions such that the outcome is an equivalence of human actions and thought processes. A system which can assist in the diagnosis of many ailments would appear as wizardry if introduced to tribes cut-off from the modern world. + +If we are to believe the current hyperbole and inflationary ideas that media, in its many forms, including fictional Hollywood block-busters present to us, many might think Artificial Intelligence is on the brink of being solved… and taking over the world to establish itself as our overlord! + +Linaro’s High Performance Computing Special Interest Group (HPC SIG) is nothing like an AI Overlord. We enable and validate the testing of HPC-related toolchains and libraries, through continuous integration (CI) processes, to ensure the end-user/researcher can perform their number crunching activities. Scenarios such as particle physics, planet formation, universe mapping and Machine Learning require enormous amounts of computational power that HPC and SuperComputer infrastructures enable. We do however, have a special interest in seeing where Artificial Intelligence development may lead us and whether the current path is the right one. + +## Are we there yet? + +Alan Turing in 1937 described how any problem having a logical solution can be reduced to a solution based upon a small set of simple instructions. + +Claude Shannon in 1938 proved that Boolean Algebra, which was developed in the 1840’s & 50s, could help with practical problems of circuit design. + +John von Neumann in 1945, defined the architecture of modern digital computers, which described the ability to store both data and program within the same memory system. He specifically differentiated the computation component from the storage component, or as we know them, CPU and RAM. + +We have enjoyed over 70 years of this style of computation and we have achieved so much. Now we come to the crux of a problem that von Neumann set in motion. In von Neumann’s June 30, 1945 “First draft of a report on EDVAC”, he included an analogy between digital computing elements and biology’s neurons and synapses. von Neumann, based on MacCulloch & Pitts research, along with humanity in general, preferred to keep things simple. They did not consider the more ‘complicated’ aspects of how neurons function and chose instead to portray neurons as having minimal characteristics. + +So, we have Turing solving all problems that have logical solutions; von Neumann simplifying neuron operation such that memory and computation are kept distinctly separate whilst controlled by synchronised events; and we have Shannon implementing boolean algebra to design circuits. + +These are the foundations from which researchers have been developing artificial intelligence, with the expectations and hope of achieving strong, or generalised AI - that which is expected to equal or better incalculable rational/irrational logical/illogical and emotional human intelligence. Of course, we can have long discussions about the nuances of what defines intelligence. Is Google’s search engine intelligent? Or IBM’s Watson an expert because it won at Jeopardy? Does winning at a board game with fixed rules infer intellect? + +Large high performance computing platforms, that consume kilowatts or even megawatts of power have not yet achieved the full dynamic and adaptable range of contexts and responses to situations with what we do in less than 20 watts - the biological power consumption of our brain. Perhaps now it starts to dawn on us that the over simplifications of the past have led us down a flawed route. But, all is not lost. + +## What have we achieved? + +Computationally, we now carry smartphones that have more processing power than systems that placed man on the moon. An often touted example. The same phones also carry out facial recognition, image rendering, voice recognition, are the edge connection to social media and occasionally are used to make telephone calls! Many of these activities have benefitted from research into artificial intelligence and artificial neural networks. + +We must be wary of naming something with its final goal in mind during its early first steps. Artificial neural networks (ANN) groups many disparate algorithms and circuits under a banner suggesting they already imitate the intricacies of neural pathways. Neuroscientists are still discovering new attributes of neurons and how they might be interleaved and connected within the brain. Biology is never complete, especially when considering evolutionary activities. + +We should ask, what level of abstraction from known neural activity is being imitated by the respective ANN? Is the ANN only an abstraction, assimilation, or parallels a specific sub-function? + +We become wary of over-used terminology. How many times can a new ANN be introduced as the latest and greatest if its step change over the predecessor is minute? This is an example for the overarching title Artificial Intelligence too. We do not want a third ‘Winter of AI’ (The Winters of ‘70s-80s and late 80s/early 90s occurred due to over-hype/under-delivery and subsequent failure to secure significant further research funding). + +Machine Learning, a subset of Artificial Intelligence, computes models that can be used to create probability matches when comparing something known with something new. However, the something new might only vary in a nuanced way and yet not fit a model at all. This is where the number and variety of example entries in a training dataset can help build a model that has a higher accuracy level than another. + +For example, a model built from a training set of cat images that happens to cover the majority of domestic varieties might fail if presented with a Manx cat (tail-less). In this instance the model would have to be scrapped and rebuilt from scratch with a new set including sufficient variations of tailless cats for the model to be improved. Processing sets of hundreds of thousands or millions of images to build a model is not the timely arena for low-power small form-factor devices. However, once the model has been built, deployment for inferencing can suit such devices. + +The Internet of Things (IoT) introduces the ability to perform AI inferencing at the edge of data networks. That is, AI is the overall topic, but inferencing is at a most basic level of comparing inputs to pre-calculated models, pattern-matching. Arm’s Ethos NPU series is specifically designed for throughput and performance efficiency in this area. Using the ArmNN SDK mobile app developers can embed a minimal codebase to enable inferencing solutions. + +Having alluded to the need for comprehensive computing functions to be available in building new models, this points to having server-capable hardware. Systems that can run for days/months uninterrupted to carry out pattern identification. Searching through the possible permutations to align particular features to mathematically described splines/planes/hyperplanes requires bulk transformation operations suited to silicon designed for the task. Previously, the CPU has been the computational workhorse. But, due to the SIMD nature of the transformations, this operation fits well with GPUs, where rotating an image correlates directly to spline/plane manipulation. + +GPUs provide more functions than purely SIMD operation and consume significant power. Whilst GPUs are used as the mainstay for Machine Learning there has been a rise in dedicated transformation hardware. ML accelerators primarily focus on the multiply-accumulate cycle of the calculation. Differing approaches are whether the operation requires transfer of the data to the accelerator first or whether the calculation can be carried out in-situ in memory. + +Clustering servers together extends the server-based ML model creation to distribute the significant number of calculations required amongst multiple nodes of the cluster. Each of those nodes may be assisted by hardware accelerators. Multiple models can be created simultaneously by balancing the computation resources for each model across available nodes. This works well when the calculations can be segmented into sub-groups without dependencies. + +High Performance Computing (HPC) further extends the cluster environment to distribute the significant number of calculations required amongst multiple nodes, which may number in their 10’s to 100’s. Each of those nodes may be assisted by hardware accelerators. HPC also enables multiple models to be created simultaneously by balancing the computation resources for each model across available nodes. Due to the RDMA capability in a HPC, cross-dependency calculations can be carried out and therefore larger and more complex models can be processed. + +The performance of supercomputers are on the cusp of providing 1018 operations per second which when searching for patterns amongst massive datasets significantly helps. Supercomputers increase the scale of operational capability compared to HPC, often having thousands of CPU cores to perform calculations. + +ARM-cpu based solutions currently being introduced, combine aspects of the processing capability of dedicated GPUs and placed within the same silicon as the generalised compute functions. Whilst System-on-Chip is not a new concept, directly integrating the highly parallelised SIMD calculation functions within the CPU’s instruction set aims to significantly boost performance in mathematical bulk transformation operations. + +Announcements from Fujitsu of new customers for systems based on their A64FX processor with Support Vector Extensions and Intel’s next generation of Xeon CPU which embed their ‘Nervana’ technology bring about the next step in CPU development. Dedicated Machine Learning accelerator hardware from the likes of Habana, Graphcore, Google et al all build towards more intensive Machine Learning, and perhaps AI. + +## Are we on the verge of a Super AI? + +We have concentrated on highlighting that simply having the tag of AI does not mean that the functionality being delivered is the entire scope of AI. In fact, as is the case with the vast majority of ‘AI’ tagged activities, the focus is on the subset, Machine Learning model creation and then inferencing using the built models. + +## The Future, TBD! + +Human ingenuity continues to push the boundaries of our physical world and now builds virtual worlds that can be used to explore beyond the constraints of known physics. We have created so much physically and ethereally. Will we mimic our neural pathways to create an AI which we might call ‘super’, Super AI soon? Probably, but not through Machine Learning alone. New computing architectures and approaches are demanded in areas such as Oscillatory networks, Reservoir computing, Generative Adversarial Networks, Spiking Neural Networks, Neuromorphic computing, Autonomic Asynchronous Recursive Neuromorphic Networks. + +Linaro's HPC Group will be working in this space to ensure those new methods have a stable high performing computational environment from which to explore the art of the possible. Afterall, calculating a brain that has 86 billion neurons and countless more synaptic connections that from somewhere within, consciousness emerges… + +For more information on Linaro’s HPC Group, current and upcoming activities, [check out this presentation](https://www.youtube.com/watch?v=xhzlV91l-zU) I recently gave at the Arm HPC User Group, an event co-located with SC19. diff --git a/src/content/blogs/challenges-of-stabilising-power-and-performance-results-in-a-board-farm.mdx b/src/content/blogs/challenges-of-stabilising-power-and-performance-results-in-a-board-farm.mdx new file mode 100644 index 0000000..c28fbfb --- /dev/null +++ b/src/content/blogs/challenges-of-stabilising-power-and-performance-results-in-a-board-farm.mdx @@ -0,0 +1,234 @@ +--- +title: Challenges of Stabilising Power and Performance Results in a Board Farm +author: lisa-nguyen +date: 2019-03-20T09:00:00.000Z +description: In Linaro, the Power Management Working Group (PMWG) manages a + board farm to boot Linux and Android kernels and run tests across various + boards. +tags: + - arm + - open-source +image: linaro-website/images/blog/collect-power-measurements-in-ci +related: [] + +--- + +In Linaro, the Power Management Working Group (PMWG) manages a board farm to boot Linux and Android kernels and run tests across various boards. Some of our board farm’s objectives include: + +* To collect power and performance results for each board type +* Monitor any regressions +* Share hardware resources within the PMWG team + +## Background + +Before we describe the challenges of stabilising power and performance results, let’s dive into some background information first. + +We were trying to standardise the way we run power and performance tests as a team. Every developer has their own testing methods and ways of analysing data. However, not everyone uses the same metrics to confirm whether their improvements to the Linux kernel are effective or not. One developer may use standard deviation. Another developer may use a particular number as a baseline. We also needed consistent test runs from the same environment and to be able to reproduce bugs easily. + +The idea of the PMWG board farm was born. Because this was going to be a collaborative effort, we requested assistance from other Linaro teams to setup CI loops, build a board farm, add features needed in our open source tools, and create a reporting dashboard. + +Our CI process is illustrated in the flowchart that we created for our farm demo at Linaro Connect Vancouver 2018 below. + +#### PMWG CI Flowchart + +![PMWG CI Flowchart](/linaro-website/images/blog/collect-power-measurements-in-ci) + +Each developer in PMWG has their own branch and we use automerge to merge changes to an integration tree automatically. When an update is detected, we trigger a build and start the process of creating and submitting a CI job. + +The Linaro LAVA Lab team also provides us with daily and weekly health reports of our instance. These reports provide a breakdown of the total number of jobs run, how many completed and how many errored. It then breaks down the errors on a per device type, per error type and device instance basis. The top level error categories are: + +#### Configuration + +Something is wrong with the pipeline job definition. E.g. required commands not being specified (this is a new category as of LAVA 2019.01) + +#### Job + +Some required action in the job failed + +#### Test + +Some issue in the test definition resulted in a failure + +#### Infrastructure + +Something went wrong with the Lab infrastructure, e.g. serial connection lost, a call to some control script failed + +#### Bug + +The job has triggered a problem in LAVA which needs to be reported to the LAVA team for investigation + +#### Canceled + +The job was canceled by a user + +The aim is to keep the infrastructure errors to less than 1%. In some CI instances, an infrastructure error triggers an automatic resubmission. This is under control of the QI team. + +Here is a snippet of a lab health report: + +``` +Total jobs:     136 + +        Total errors:   11 (8.09%) + +        LAVA errors:    0 (0.00%) + +        Test errors:    7 (5.15%) + +        Job errors:     4 (2.94%) + +        Infra errors:   0 (0.00%) + +        Canceled jobs:  0 (0.00%) + +Device type:    hi960-hikey + +Total jobs:     107 + +Total errors:   5 (4.67%) + +        Error type:     Job + +        Error count:    2 (1.87%) + +                Error: auto-login action timed out + +                        Count: 1 (0.93%) + +                        IDs: + +                        hi960-hikey-02: + +                                15316 + +                Error: git-repo-action timed out + +                        Count: 1 (0.93%) + +                        IDs: + +                        hi960-hikey-02: + +                                15308 + +        Error type:     Test + +        Error count:    3 (2.80%) + +                Error: Device NOT found! + +                        Count: 3 (2.80%) + +                        IDs: + +                        hi960-hikey-01: + +                                15254 + +                        hi960-hikey-02: + +                                15218 15219 + +Device type:    juno-r2 + +Total jobs:     7 + +Total errors:   0 (0.00%) + +``` + +The daily reports are run starting at 07:00 UTC, and the weekly reports run on Wednesday starting at 06:00 UTC. The utility emails out to a list specified to the script. On PMWG this is currently Vincent, Lisa and the Lab team. + +## Challenges + +### Infrastructure and Integration + +The first challenge we had was to set up the test infrastructure before we could collect any measurements. + +We asked the Linaro LAVA team to enable and integrate Arm energy probe (AEP) support in LAVA. The lack of hardware made it difficult for the LAVA team to fulfill this request in the beginning. We showed the LAVA team how the arm-probe command-line tool works to make the AEP integration process easier. Jointly, we wrote test definitions to detect the AEP and run an AEP command to collect data in LAVA. + +We also spent months soldering hikey and hikey960 boards in order to connect the AEPs, and shipping them to the Linaro labs in Cambridge, UK for racking. This is already challenging for developers who want to collect power measurements locally because most development boards do not provide easy access to the power domain and often need rework. + +### Stability and Reliability + +Once we had the test infrastructure in place, we focused on stabilising our power and performance results. We made some hardware and software adjustments along the way. + +#### Hardware Adjustments + +We noticed static interference between boards. When our hikey and hikey960 jobs ran simultaneously, the hikey960 test results were affected by the disturbance caused by our hikeys. It became important to isolate these boards to minimise the chances of the static interference from reoccurring. + +As a first step to solve this problem, we reorganised the board farm. Our board farm was moved to a more temperature controlled area of the lab with more space. + +Before the reorganisation, we noticed the idle power consumption was rather high. + +In this overview chart of idle power consumption below, the left portion shows high results before the reorganisation and then the right portion shows the improved results afterwards. + +#### Idle power consumption results before and after the reorg + +![Idle power consumption results before and after the reorg](/linaro-website/images/blog/idle-power-consumption-1) + +The next two charts show a closer view of before and after. + +#### Closeup of idle power consumption results before the reorg + +![Closeup of idle power consumption results before the reorg](/linaro-website/images/blog/idle-power-consumption-2) + +#### Close up of idle power consumption results after the reorg + +![Close up of idle power consumption results after the reorg](/linaro-website/images/blog/idle-power-consumption-3) + +Before, we collected 328 measurements with a standard deviation of 2.15% to be compared with a probe precision of 2.12%. After, we collected 336 measurements with a standard deviation of 1.38% to be compared with a probe precision of 2.14%. + +Surprisingly fan placement affected our results as well. We noticed a difference between placing the fan on top of the board versus on the side. Moving the fan also reduced vibrations that our lab team noticed. + +The first hikey960 board had the correct fan placement on the side but the second one did not, so we saw a big variation in our measurement on the left side of the chart below. While our jankbench results were steady, our boards tended to overheat when we ran vellamo, a more intensive workload that triggered thermal mitigation. + +### Fan placement affected our data. Vellamo results in red. Jankbench results in blue. + +![Fan placement affected our data. Vellamo results in red. Jankbench results in blue](/linaro-website/images/blog/fan-placement) + +No two boards generated the same or similar results, which led to significant power consumption offsets. For example, our hikey960 boards behaved differently. There was a huge power consumption offset of 15-20% difference between our hikey960 boards before we started troubleshooting with our lab team. + +* Were the power supplies identical? +* Did we need to swap cables? +* Did they have the same fan size? +* What would happen if we removed sdcards? +* Were the probes connected properly? + +The troubleshooting did not end there. We removed the daughter board, used a USB to serial cable for the console, checked firmware versions, and more. With those changes, we reduced the power offset difference to 5-10%. We reached a close error margin of 1% between our two hikey960 boards recently with more accurate AEP calibration. + +#### Software Adjustments + +To solve our overheating issues, we increased the cooling times by running eight iterations of 15 second idle workloads in between tests, totaling two minutes in cooldown time. In the initial test results, we noticed that the multimedia workloads “consumed” less power than the idle workloads, which did not make sense. + +We kept our tools as current as possible. We upgraded LAVA from v1 to v2 and learned how to rewrite jobs in YAML instead of JSON. We also moved to Workload Automation v3, taking advantage of the newer energy measurement instrument. Although we acknowledge that the latest version of tools can generate regressions occasionally. + +Originally we had one large CI job that took four hours to complete on average. Then we decided to split the CI job into two smaller ones: one for multimedia use cases (audio and video), and the other to run vellamo only. Having a dedicated vellamo CI job would be less likely to impact other tests like idle. We also cut the amount of time to run our tests in half by running smaller CI jobs. + +We started tracking performance trends for kernel versions 4.9, 4.14, and 4.19. 4.9 is our reference for this comparison chart below. + +### Tracking performance trends for kernel versions 4.9, 4.14, and 4.19 + +![Tracking performance trends for kernel versions 4.9, 4.14, and 4.19](/linaro-website/images/blog/tracking-performance-trends) + +## Recommendations + +Based on our experiences with our board farm so far, we would make the following recommendations: + +1. Check physical connections frequently. Small movements such as rearranging boards in a rack may disconnect the probe on accident, or as we mentioned before, fan placement can greatly affect power and performance results. +2. Keep tools and firmware versions up to date. +3. Run stable, well-known images regularly to help detect any hardware or infrastructure regressions. +4. Add multiple iterations of idle workloads for the board to cool down between runs. +5. If there is more than one of the same board type (in our case, multiple hikey and hikey960 boards), check that they have the same components. Use the same power supply, sdcards from the same batch, same fans, and more. It will make troubleshooting easier. +6. Check that the AEP config file reflects the physical board setup with the correct channel names, numbers, and values. We cannot assume that one hikey960 AEP config will be identical to another one. +7. While isolating the boards may not be necessary, it helped to stabilise our results. +8. Calibrate the arm energy probes often. To do that, we use the arm-probe command-line tool and run this command with the autozero option: + +``` +arm-probe --config </path/to/config> -z +``` + +## Future + +With our success in collecting power and performance results for Android, we hope to do the same for the Linux kernel. We also want to test patches from the Linux/Arm kernel mailing list to find any regressions and report back to the developers. Lastly, we hope to collaborate with kernelci to share resources and provide more useful results to kernel maintainers and contributors other than boot test reports. + +For more information on the PMWG board farm, visit [/kernel-and-toolchain/](/core-technologies/toolchain/). diff --git a/src/content/blogs/debugging-and-profiling-hpc-applications-while-working-remotely.mdx b/src/content/blogs/debugging-and-profiling-hpc-applications-while-working-remotely.mdx new file mode 100644 index 0000000..b7d8f47 --- /dev/null +++ b/src/content/blogs/debugging-and-profiling-hpc-applications-while-working-remotely.mdx @@ -0,0 +1,126 @@ +--- +title: Debugging and Profiling HPC Applications while Working Remotely +description: In this blog we talk about how to debug and profile HPC + applications while working remotely. Read more here! +date: 2020-07-20T08:29:41.000Z +image: linaro-website/images/blog/Banner_Toolchain +tags: + - hpc +author: nick-forrington +related: [] + +--- + +The ongoing impact of the COVID-19 pandemic means that more and more scientific research is being conducted by teams working remotely. + +While remote access to compute resources is nothing new, visual tasks such as debugging and profiling can become difficult as network latencies increase, and remote graphics solutions become unresponsive. + +The good news is that the Linaro Forge cross-platform tools suite provides various GUI and command-line methods for remote debugging and profiling, including the following components: + +* [Linaro DDT](https://www.linaroforge.com/linaroDdt/) +* [Linaro MAP](https://www.linaroforge.com/linaroMap/) +* [Linaro Performance Reports](https://www.linaroforge.com/linaroPerformanceReports/) + +Here we describe some of those methods that you can use to quickly get up and running. + +# Linaro Forge remote client + +The Linaro Forge remote client allows you to debug and profile remote jobs, while running the GUI on your local machine. This is faster than remote-X11 (particularly for slow connections) and provides a native GUI. + +The remote client is available for Windows, Mac, and Linux, and can also be used as a local viewer for collected MAP profiles. + +The Linaro Forge remote client will connect and authenticate using SSH, and use existing licensing from your remote compute resource, so minimal setup is required. + +![linaro forge remote launch settings](/linaro-website/images/blog/linaro-forge-remote-launch-settings) + +When you are connected, Forge looks and behaves as it does when running locally, but launches jobs, browses for files, and uses the configuration found on the remote system. + +Additionally, the Reverse Connect feature allows you to easily launch jobs with DDT and MAP using your usual terminal. + +For example, once connecting your remote client, run the following in your remote terminal: + +`ddt --connect mpirun -np 24 ./a.out` + +Or + +`map --connect mpirun -np 24 ./a.out` + +When you execute the DDT or MAP --connect command, a connection is made to your existing remote client, and the specified mpirun command is executed to start up your program. + +This provides a convenient way for the remote client to work with batch systems and avoids the need to tell DDT or MAP about any program parameters, environment variables, or module files required. + +For more information on the Forge Remote Client see: + +* [Forge remote client setup and usage](https://www.olcf.ornl.gov/tutorials/forge-remote-client-setup-and-usage/) +* [Connecting to a remote system](https://developer.arm.com/documentation/101136/2010/Arm-Forge/Connecting-to-a-remote-system) + +# Using Linaro Forge from the command-line + +While the previous section describes how to better use the Forge GUI on remote connections, sometimes working without a GUI can be preferable. + +Not only is this useful when working remotely, it can be useful to drive the tools inside batch scripts, or integrate them into Continuous Integration workflows. + +## Debugging Offline with DDT + +Linaro DDT is best known as an interactive debugger. But whether an unreliable connection makes using a GUI difficult, or you are not sure if you are at your desk when your job is scheduled, offline debugging can be a very useful alternative. + +Offline debugging provides the complete breadth of DDT debugging capabilities but without user interaction, and without using the GUI. + +Instead, DDT generates a report when your job completes, detailed any crashes or areas of interest (specified when launching), along with relevant variables. This mode can simplify the debugging process when you are working in the easily interrupted WFH world. + +For example: + +`ddt --offline mpirun -n 4 PROGRAM [ARGUMENTS]...` + +This shows an example detecting a program crash, highlighting the offending line of code, along with variables. + +![debugging offline with ddt](/linaro-website/images/blog/debugging-offline-with-ddt) + +With command-line arguments or session files, you can define breakpoints, watchpoints, and tracepoints to gather more extensive data. + +# Collecting MAP profiles from the command line + +When you are debugging, it is common to submit runs from inside a debugger. For profiling, the common approach would be to run the program offline, producing a profile file that can be inspected later. To do this, replace your usual program invocation with a MAP command such as: + +`map --profile mpirun -n 4 PROGRAM [ARGUMENTS]...` + +MAP runs without a GUI, gathering data to a .map profile file. + +When you have collected your profile, you can: + +1. Connect the remote client, and browse to open it (see previous) +2. Copy it to your local machine, and open it with the remote client +3. Convert it to a Performance Report (see the following) + +# Characterise performance with Performance Reports + +[Linaro Performance Reports](https://www.linaroforge.com/linaroPerformanceReports/) is a low-overhead tool that produces one-page text and HTML reports summarizing and characterizing both scalar and MPI application performance. + +These reports can be particularly useful when working remotely, because no remote GUI is required – all that is needed is a web browser. + +Reports are generated by simple modifications to your launch command: + +`perf-report mpirun -n 4 PROGRAM [ARGUMENTS]...` + +You can copy the resulting report to your local machine and open it with your web browser. Here you’ll see summary information: + +![performance reports summary information](/linaro-website/images/blog/performance-reports-summary-information) + +As well as sections displaying more detail and advice for CPU usage, MPI, I/O, Threading/OpenMP usage, memory usage, and energy usage. + +![performance reports cpu usage](/linaro-website/images/blog/performance-reports-cpu-usage) + +For more information about performance reports, see: + +* [Linaro Forge user guide](https://www.linaroforge.com/documentation/) +* [Interpreting performance reports](https://developer.arm.com/documentation/101136/2010/Performance-Reports/Interpreting-performance-reports) + +# [](https://developer.arm.com/documentation/101136/2010/Performance-Reports/Interpreting-performance-reports)Summary + +In this article, we have introduced potential solutions to some of the issues faced when debugging and profiling HPC code when working remotely. + +Using Linaro Forge, we have covered how to improve the performance of GUI solutions, by using the remote client rather than generic solutions like X11-forwarding. + +We have also discussed how to bypass the GUI entirely, and use DDT, MAP, and Performance reports non-interactively from the command line, which can help with slow connections, and dealing with busy batch systems. + +[Request free Linaro Forge trial](https://www.linaroforge.com/freeTrial/) diff --git a/src/content/blogs/debugging-memory-tagging-with-lldb-13.mdx b/src/content/blogs/debugging-memory-tagging-with-lldb-13.mdx new file mode 100644 index 0000000..be75305 --- /dev/null +++ b/src/content/blogs/debugging-memory-tagging-with-lldb-13.mdx @@ -0,0 +1,370 @@ +--- +title: Debugging Memory Tagging with LLDB 13 +description: In this blog David Spickett looks at Memory Tagging (MTE) and the + debugging features Linaro has added to LLDB 13. +date: 2022-04-21T11:12:50.000Z +image: linaro-website/images/blog/Banner_Linux_Kernel +tags: + - open-source + - debugging +author: david-spickett +related: [] + +--- + +LLDB is the LLVM project’s debugger (https://lldb.llvm.org/), supporting a wide range of platforms and architectures including Android and Linux running on AArch64. + +Armv8.5-a added the Memory Tagging Extension (MTE) to AArch64 and LLDB 13 is the first version to support debugging Linux applications that use memory tagging. Including: + +* Reading and writing memory tags. +* Detecting memory tagged memory regions. +* Annotating memory tagging faults. + +([https://releases.llvm.org/13.0.0/docs/ReleaseNotes.html#changes-to-lldb](https://releases.llvm.org/13.0.0/docs/ReleaseNotes.html#changes-to-lldb)) + +In this post we’re going to talk about what Memory Tagging (MTE) is and demonstrate the debugging features Linaro has added to LLDB 13. + +# What is Memory Tagging? + +Let’s start with a problem memory tagging is designed to detect. Imagine you have 2 buffers in memory next to each other. You also have a pointer to the first buffer. This is what the memory layout looks like. + +![What is Memory Tagging](/linaro-website/images/blog/what-is-memory-tagging) + +Note that I’ve said “should not” be used. Lots of operations write some value, increment a pointer and loop again. Often the final value of the pointer in those functions can be just beyond the range it was intended for (like a C++ end iterator) or, if there are mistakes or a determined attacker, way off into memory it was never meant to access. + +The core issue is that even if your intent was only to use the pointer to address Buffer 1, in reality there is no enforcement of this. + +For example, you want to read a user name from a configuration file. You read the data as a null terminated string from the file and expect to get 64 characters max, so you allocate a 65 byte buffer (perhaps your frontend only has a 64 character wide input box). + +This all works fine in normal circumstances but what if an attacker crafted their own configuration file with a > 64 character name? What would happen is that as you read the name you’d fill in your 64 character buffer and just keep on incrementing and writing memory until the null terminator was found. + +So now the attacker can overwrite whatever is after that buffer. Sensitive variables, the return address, anything they want. + +Of course there are existing defences and best practices for this. You could use safer versions of standard functions, use a language that encodes bounds somehow, or use safe (safer) containers like we have in C++. Even so, mistakes get made and porting to a new language has its own challenges so it’s not always an option. + +So could we have the hardware check memory bounds for us? Yes we can, and we call it memory tagging. + +Memory tagging allows you to tag areas of memory and tag pointers that should be used to access those areas. The tag of the pointer must match that of the memory being accessed. Let’s see how this helps the situation we talked about above. Here’s the same diagram with memory tags added. + +![What is Memory Tagging image 2](/linaro-website/images/blog/what-is-memory-tagging-image-2) + +Now what happens is when the pointer is incremented past Buffer 1 to point into Buffer 2, if someone tries to read or write using it, the access will fail. As we’ve deliberately tagged the memory after Buffer 1 with a tag that doesn’t match the pointer (not shown here but you would also do this to memory before Buffer 1, to prevent underflow). + +Here’s how the hardware implements this. Tags come in pairs: + +* A 4 bit (0-15) “logical tag” found in bits 59-56 of a pointer. Virtual addresses are usually 48 bits (up to 52 with extensions) so these bits are otherwise unused. +* A 4 bit “allocation tag” that is stored in a special section of memory for tag storage. + +![Bit layout of a logically tagged pointer](/linaro-website/images/blog/bit-layout-of-a-logically-tagged-pointer) + +Note: The above assumes you are only using memory tagging. The other unused bits can be used in other ways that are not covered by this article. + +Special instructions are provided to get and set these tags (and they can be modified once set, or “unset” usually meaning set to 0). + +Each pair of tags refers to a “granule” of memory, which is 16 bytes, aligned to 16 bytes. Meaning that bytes 0-15 is a granule, the next granule goes from bytes 16-31 and so on. + +When there is an access to a granule the logical tag in the pointer is compared to the allocation tag of the granule. A mismatch means an exception, stopping the access (how you respond to that exception is up to you). + +![Allocation tag](/linaro-website/images/blog/allocation-tag) + +Note: You don’t have to explicitly give a pointer a logical tag for tag checking to occur. If the location has an allocation tag, a check will be done. Whatever is in bits 59-56 of the address will be used as the logical tag. + +Of course software does have to do the initial tagging. After all, it knows where the buffers are. However compiler code generation and library support greatly reduces the effort needed. + +# An Example Program + +To give context to the new features in LLDB we’re going to first show you an application using memory tagging that has a bug. Then we’ll debug it using the new features. + +The following example: + +* Protects a section of a buffer using memory tagging. +* Accesses that section using a correctly tagged pointer. +* Accesses that section using an incorrectly tagged pointer, causing an exception. + +Note: This is not real code, simply a minimal demo of memory tagging. + +![An example program](/linaro-website/images/blog/an-example-program) + +We’re going to give our incorrect pointer a tag of 2 so it matches none of the allocation tags. + +Note: The program uses ACLE intrinsics that you can learn about at [https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics?lang=en](https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics?lang=en). + +``` + + +#include +#include +#include +#include +#include +#include + +int main(int argc, char const *argv[]) { + if (prctl(PR_SET_TAGGED_ADDR_CTRL, + PR_TAGGED_ADDR_ENABLE | + // Synchronous tag fault exceptions + PR_MTE_TCF_SYNC | + // Allow all tags to be generated by the addg + // instruction __arm_mte_increment_tag produces. + (0xffff << PR_MTE_TAG_SHIFT), + 0, 0, 0)) { + return 1; + } + + char *mte_buf = + mmap(0, sysconf(_SC_PAGESIZE), PROT_WRITE | PROT_READ | PROT_MTE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mte_buf == MAP_FAILED) + return 1; + + // We're going to tag 32 bytes with 1. + // First set the logical tag in the pointer. + mte_buf = __arm_mte_create_random_tag(mte_buf, ~(1 << 1)); + // Then the allocation tags. + __arm_mte_set_tag(mte_buf); + // 16 bytes is the tag granule size + __arm_mte_set_tag(mte_buf + 16); + + // Give the incorrect pointer a logical tag of 2 + char *incorrect_tag_ptr = __arm_mte_create_random_tag(mte_buf, ~(1 << 2)); + + // Should work + memset(mte_buf, 1, 32); + // Should fault + memset(incorrect_tag_ptr, 1, 32); + + return 0; +} + + + +Compile with: +aarch64-unknown-linux-gnu-gcc -march=armv8.5-a+memtag main.c -o prog -g + +clang -target aarch64-linux-gnueabi -march=armv8.5-a+memtag main.c -o prog -g + +(MTE was introduced in v8.5-a but is optional, so we have to add “+memtag”) +``` + +# The Result + +``` +$ ./prog +Segmentation fault +``` + +Oh no! Well actually, oh yes! Our example faulted as we intended. Let’s confirm we got things wrong in all the right ways. + +# Catching the Problem in LLDB + +``` +$ ./bin/lldb <...>/prog +(lldb) target create "<...>/prog" +Current executable set to '<...>/prog' (aarch64). +(lldb) gdb-remote <...> +Process 175530 stopped +* thread #1, name = 'prog', stop reason = signal SIGSTOP +<...> +(lldb) c +Process 175530 resuming +Process 175530 stopped +* thread #1, name = 'prog', stop reason = signal SIGSEGV: sync tag check fault (fault address: 0x200fffff7ff9000 logical tag: 0x2 allocation tag: 0x1) + frame #0: 0x0000fffff7ee3e94 libc.so.6`___lldb_unnamed_symbol2690 + 84 +libc.so.6`___lldb_unnamed_symbol2690: +-> 0xfffff7ee3e94 <+84>: str q0, [x0] +<...> +``` + +We have the fault we expected. We tried to use a pointer logically tagged with 2, to access memory allocation tagged with 1. + +# Finding the Root of the Problem + +Let’s look at the instruction that faulted to get confirmation of what LLDB is telling us. You don’t need to do this yourself, this is just to show you what LLDB is automating for you. + +``` +libc.so.6`___lldb_unnamed_symbol2690: +-> 0xfffff7ee3e94 <+84>: str q0, [x0] +``` + +This instruction means “store the data in register q0 starting at the address in register x0”. + +``` +(lldb) register read v0 x0 + v0 = {0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01} + x0 = 0x0200fffff7ff9000 +``` + +Note: We’re using the “v” register name here because LLDB does not know about the “q” form. The result is the same for this purpose. + +In v0 we see the 1 passed to memset repeated so that this store will write 16 bytes. The logical tag in x0 is 2. Exactly what the fault description showed us. + +Going up one stack frame confirms the problem. + +``` +(lldb) up +frame #1: 0x0000000000400778 prog`main(argc=1, argv=0x0000fffffffff498) at main.c:40:3 + 37 // Should work + 38 memset(mte_buf, 1, 32); + 39 // Should fault +-> 40 memset(incorrect_tag_ptr, 1, 32); + 41 + 42 return 0; + 43 } +``` + +We successfully memset using the correctly tagged pointer and failed with the incorrectly tagged pointer. + +For this example the journey ends here, just use the correct pointer. Let’s pretend it wasn’t that simple and see what new features LLDB 13 has to help you dig into the problem. + +# MTE Features in LLDB 13 + +Note: Though AArch64 MTE is the only memory tagging scheme supported by LLDB at this time, the commands are generic. Support could be added for other schemes if there are contributors willing to do so. + +## memory region + +The first thing you might say is “is that memory even tagged?”. Especially if you’re using a library that has enabled tagging seamlessly for you. Let’s check which mapping it’s in. + +``` +(lldb) memory region incorrect_tag_ptr +[0x0000fffff7ff9000-0x0000fffff7ffa000) rw- +memory tagging: enabled +``` + +That last line is a new feature in 13. Here’s an untagged region for comparison. + +``` +(lldb) memory region main +[0x0000000000400000-0x0000000000401000) r-x /mnt/virt_root/mte_blog_post_program/prog PT_LOAD[0] +``` + +## memory tag read + +The fault description gave us the allocation tag of one granule of memory. How do we see what the rest are set to? With a new command “memory tag read”. + +``` +(lldb) memory tag read incorrect_tag_ptr incorrect_tag_ptr+64 +Logical tag: 0x2 +Allocation tags: +[0xfffff7ff9000, 0xfffff7ff9010): 0x1 (mismatch) +[0xfffff7ff9010, 0xfffff7ff9020): 0x1 (mismatch) +[0xfffff7ff9020, 0xfffff7ff9030): 0x0 (mismatch) +[0xfffff7ff9030, 0xfffff7ff9040): 0x0 (mismatch) +``` + +Note: As memory tagging works in granules any address ranges will be expanded to cover those granules. Meaning that reading the tag for 1 byte is the same as reading the tag for all 16 bytes of its granule. + +The output shows us what the hardware was expecting and indeed, none of them are 2. If we read via mte\_buf which is correctly tagged, the markers will change. + +``` +(lldb) memory tag read mte_buf mte_buf+64 +Logical tag: 0x1 +Allocation tags: +[0xfffff7ff9000, 0xfffff7ff9010): 0x1 +[0xfffff7ff9010, 0xfffff7ff9020): 0x1 +[0xfffff7ff9020, 0xfffff7ff9030): 0x0 (mismatch) +[0xfffff7ff9030, 0xfffff7ff9040): 0x0 (mismatch) +``` + +This explains why the first memset worked. mte\_buf is correctly tagged for the first 32 bytes. We now also know that if we had tried to set more than 32 bytes, we would have faulted earlier. + +Note: From the processor’s point of view the two pointers point to the same place because the memory tags are ignored when finding the final location. So in both examples above you are reading the same allocation tags. LLDB is showing you what would happen if running code tried to use those pointers. + +## memory tag write + +So let’s assume we want this program to be successful at any cost. Can we just change the allocation tags to 2? Yes we can. First run until the faulting memset. + +``` +(lldb) b main.c:40 +Breakpoint 1: where = prog`main + 188 at main.c:40:3, address = 0x0000000000400768 +(lldb) c +Process 190332 resuming +Process 190332 stopped +* thread #1, name = 'prog', stop reason = breakpoint 1.1 + frame #0: 0x0000000000400768 prog`main(argc=1, argv=0x0000fffffffff498) at main.c:40:3 + 37 // Should work + 38 memset(mte_buf, 1, 32); + 39 // Should fault +-> 40 memset(incorrect_tag_ptr, 1, 32); + 41 + 42 return 0; + 43 } +``` + +Then use the new “memory tag write” command to update the allocation tags. + +``` +(lldb) memory tag write incorrect_tag_ptr 2 2 +(lldb) memory tag read incorrect_tag_ptr incorrect_tag_ptr+64 +Logical tag: 0x2 +Allocation tags: +[0xfffff7ff9000, 0xfffff7ff9010): 0x2 +[0xfffff7ff9010, 0xfffff7ff9020): 0x2 +[0xfffff7ff9020, 0xfffff7ff9030): 0x0 (mismatch) +[0xfffff7ff9030, 0xfffff7ff9040): 0x0 (mismatch) +``` + +The program should now exit successfully when we continue. + +``` +(lldb) c +Process 175596 resuming +Process 175596 exited with status = 0 (0x00000000) +``` + +The example above uses a start address and a list of tag values. The command can also repeat a pattern of tags across a range. I could have done this instead: + +``` +(lldb) memory tag write incorrect_tag_ptr 2 --end-addr incorrect_tag_ptr+32 +``` + +That repeats the tag 2 as many times as there are granules between the start and end address. This also works for patterns. Here I’m setting the tags of the whole memory allocation to a cycling incrementing pattern. + +``` +(lldb) memory tag write mte_buf 0 1 2 3 4 5 6 7 -end-addr mte_buf+4096 +(lldb) memory tag read mte_buf mte_buf+256 +Logical tag: 0x1 +Allocation tags: +[0xfffff7ff9000, 0xfffff7ff9010): 0x0 (mismatch) +[0xfffff7ff9010, 0xfffff7ff9020): 0x1 +[0xfffff7ff9020, 0xfffff7ff9030): 0x2 (mismatch) +<...> +[0xfffff7ff9080, 0xfffff7ff9090): 0x0 (mismatch) +[0xfffff7ff9090, 0xfffff7ff90a0): 0x1 +[0xfffff7ff90a0, 0xfffff7ff90b0): 0x2 (mismatch) +``` + +Useful if you want to clear tags or set some recognisable default values. + +# Future Work + +## Memory Tags in “memory read” Output + +Note: This is included in LLDB 14, which was unreleased at time of writing (https://releases.llvm.org/14.0.0/docs/ReleaseNotes.html#changes-to-lldb). + +Use the new “show-tags” argument to print out the allocation tags in line with the memory contents. + +``` +(lldb) memory read mte_buf mte_buf+32 -f "x" -s8 --show-tags +0x900fffff7ff8000: 0x0000000000000000 0x0000000000000000 (tag: 0x0) +0x900fffff7ff8010: 0x0000000000000000 0x0000000000000000 (tag: 0x1) +``` + +Note: -f “x” -s8 is not required, just added to print values as 8 byte hex numbers for cleaner results. + +## Memory Tagging in Core Files + +A core file is a dump of the process state at a certain point. Agreement has been reached about the form MTE state will take in these files and a future version of LLDB will support reading memory tags from core files. + +## LLDB API features + +LLDB has a scripting language API which is primarily used with Python ([https://lldb.llvm.org/use/python.html](https://lldb.llvm.org/use/python.html)). + +We have been working on API additions for memory tagging but lack a concrete use case at this time. If you have an interest in API support you may wish to try out the prototype and give us feedback. + +We have a branch [(https://github.com/DavidSpickett/llvm-project/tree/lldb-mte-api-14.0.1)](https://github.com/DavidSpickett/llvm-project/tree/lldb-mte-api-14.0.1) and an LLVM 14.0.1 based release package for AArch64 Linux ([https://people.linaro.org/\~david.spickett/clang+llvm-14.0.1-aarch64-unknown-linux-gnu.tar.xz](https://people.linaro.org/~david.spickett/clang+llvm-14.0.1-aarch64-unknown-linux-gnu.tar.xz)) that includes the changes. + +(note that the branch includes the example scripts, so you will need that even if you’re using the prebuilt files) + +Either way, if you have an interest in memory tagging features for scripting or IDE support please contact us (linaro-toolchain@lists.linaro.org). + +For more information on LLVM toolchain work at Linaro see [https://linaro.atlassian.net/wiki/spaces/LLVM/overview](https://linaro.atlassian.net/wiki/spaces/LLVM/overview). diff --git a/src/content/blogs/debugging-while-you-sleep-using-linaro-ddt.mdx b/src/content/blogs/debugging-while-you-sleep-using-linaro-ddt.mdx new file mode 100644 index 0000000..f5221c5 --- /dev/null +++ b/src/content/blogs/debugging-while-you-sleep-using-linaro-ddt.mdx @@ -0,0 +1,64 @@ +--- +title: Debugging while you sleep using Linaro DDT +description: In this blog we talk about how to debug offline using Linaro DDT. + Read more here! +date: 2018-04-24T11:57:38.000Z +image: linaro-website/images/blog/DataCenter +tags: + - hpc +author: beau-paisley +related: [] + +--- + +I like sleep. Sleep is good. With plenty of sleep I address the day with vigor, and approach the duties of the day with zeal. So, when I discovered how to debug while I slept I was excited and I think that you will be excited too. + +# How to debug offline with Linaro DDT + +Linaro DDT enables you to debug while you sleep using offline debugging. Offline debugging provides the complete breadth of DDT debugging capabilities but without user interaction and without using the GUI. You run your application as usual either through your batch system or directly, and instead of interacting with the GUI DDT will generate a report upon completion of your job. [Download documentation](https://www.linaroforge.com/documentation/) for offline debugging. + +You can control offline debugging with command line arguments and by providing a session file. Specifying breakpoints, watchpoints, and tracepoints on the command line can be very useful when you are scripting many runs for a parameter type diagnosis or setting up offline debug results as part of your regression testing. It can be simpler and more expedient, however, to create a session file that specifies the features that you want. So, let’s start up DDT and create a session file. + +![ddt session file example](/linaro-website/images/blog/ddt-session-file) + +For this illustration I am going to use a simple example that uses OpenMP and MPI to parallelize the calculation of pi. At this point we are just using the user interface to generate a session file, we are not really running in earnest, so I will start with one thread and one MPI process. When we go to submit the job for debugging you can run in a different configuration, typically much larger scale. For my first run I want to set a breakpoint right after all the MPI setup boiler plate and see that everything looks fine. + +![example using openmp and mpi](/linaro-website/images/blog/example-using-openmp-and-mpi) + +The GUI shows us that we have a breakpoint on line 70. Now I have a loop a little farther down that I want to study. From the following dialog we can see that I am going to output the value of sum on line 98. The number of iterations, n in this loop is rather large so I am only going to log the first five iterations, that should be enough for a first look. + +![gui](/linaro-website/images/blog/gui) + +Now let’s save these specifications in a session file. From the File menu select Save Session and provide a file name. + +![session files](/linaro-website/images/blog/session-files) + +I chose the name cpi.session. This is just a simple xml file that will be provided to DDT to specify the breakpoint and tracepoint we created. Now let’s exit the GUI and submit our offline debugging session. + +![cpi session](/linaro-website/images/blog/cpi.session) + +As I did not specify an output file name, DDT chose one for me. Using the –output flag you can specify your own filename. By default, DDT will create the output in HTML format. If, however, you specify a file name that does not have the suffix .htm or .html DDT will create a CSV format file. This is much more compact and amenable to post processing with tools like sed, grep, awk, etc. But, let’s save that for another day and have a look at our output. + +![cpi-mpi logbook](/linaro-website/images/blog/cpi-mpi-logbook) + +First thing, much like the logbook in DDT, the offline run records the date of your run and build date of the executable. This is good info to help stay organized when working on challenging bugs even if you are getting plenty of sleep. Now let’s scroll down and have a look at what info we got for that breakpoint we set. + +![offline run records](/linaro-website/images/blog/offline-run-records) + +A wealth of information, like the DDT GUI. A snapshot of the current stack and view of all the variables with sparklines to show their values across processes. And, scrolling down to the sparklines section of the output we can see the tracepoint showing the sparkline and min max values for the variable sum for the first five iterations of the loop. + +![tracepoints](/linaro-website/images/blog/tracepoints-debugging-blog) + +Memory debugging was enabled in our run with the –mem-debug flag. We don’t suspect any memory leaks in this code, but it is usually easier to fix those types of problems sooner rather than later, so it is good practice to always enable memory debugging. The overhead of the default usage of memory debugging in DDT is very low and you are probably sleeping when your offline run executes, so why not! + +![memory leak report](/linaro-website/images/blog/memory-leak-report) + +And, continuing to the bottom of the output we get the actual output of our run. + +![debugging output](/linaro-website/images/blog/debugging-output) + +This post has been a whirlwind overview of just one of the use cases for offline debugging with DDT. I invite you to visit the documentation reference below to investigate many of the other ways to take advantage of offline debugging. + +[Documentation for offline debugging](https://www.linaroforge.com/documentation/) + +[](https://www.linaroforge.com/documentation/) diff --git a/src/content/blogs/device-tree-future-improvements.mdx b/src/content/blogs/device-tree-future-improvements.mdx new file mode 100644 index 0000000..7dd8e0c --- /dev/null +++ b/src/content/blogs/device-tree-future-improvements.mdx @@ -0,0 +1,72 @@ +--- +related_project: + - DTE +title: "Device Tree: Future Improvements" +description: In this article, Joakim Bech gives an overview on what exactly + Device Tree is and what it does as well as its future improvements. Read more + here! +date: 2020-10-23T10:08:40.000Z +image: linaro-website/images/blog/devicetree-logo_vertical-devicetree +tags: + - arm + - linux-kernel +related_projects: + - DTE +author: joakim-bech +related: [] + +--- + +## What is Device Tree? + +Device Tree is a data structure used to describe hardware. Instead of hard coding every detail of a device into an operating system, aspects of the hardware can be described in a data structure which is then passed to the operating system at boot time. + +## Device Tree - Future improvements + +Device Tree has been around for a long time and is a well known technology for engineers working with embedded devices. One of the key goals with Device Tree was to separate specific settings related to a specific SoC into separate configurations, in a way that would make it possible to run a generic kernel (Linux kernel) and provide different Device Tree Blobs (the name that refers to the compiled form of a Device Tree configuration, DTB for short) for different hardware configurations. Originating from [Open Firmware](https://www.kernel.org/doc/html/latest/devicetree/usage-model.html#history), Device Tree was picked up by the Linux kernel roughly fifteen years ago, as an effort to try and sort out what at the time was a rather [messy](https://lkml.org/lkml/2011/3/17/492) configuration of Arm devices. Today, you will find hundreds of DTS-files in the Linux kernel tree for all sorts of devices coming from a plethora of SoC manufacturers. In Linux kernel v5.8 there are 1833 \*.dts files, which shows the Device Tree approach of doing device configuration in Linux kernel has been pretty successful. + +With this clearly being such a well established technology and having configurations for so many devices, is there any problem with it? Our embedded devices seem to boot up fine using this approach. As with other technologies, things are constantly evolving and a technology that was well designed and fully working a couple of years ago, might be in need of updates to fit better with the systems which are available and being used today. This is what Linaro, along with its members, have seen with respect to the Device Tree -- it’s not just about evolving a working technology to work with future systems, it’s also about improving usability, enhancing security, etc., which are always ongoing efforts. + +## Enter firmware + +In a typical setup the DTB (Device Tree Blob) files are provided to the Linux kernel by a boot loader, such as U-Boot. However, U-Boot is more or less in the same situation as the Linux kernel. i.e., there is a need for a device configuration for U-Boot also. It doesn’t stop there either, boot loaders such as U-Boot and Linux kernel are almost always last in the boot chain. Today it’s not uncommon to see other components running before both U-Boot and Linux kernel. On Armv8-A systems you run some ROM-code (can be based on Trusted Firmware-A, BL1), a first stage boot loader (TF-A, BL2) and TEE’s (like OP-TEE). These firmware components use far less device configuration, but there is still a need to read device configuration information in these firmware components also. We’ve also seen that it’s not uncommon for these components to runtime modifications to the Device Tree in memory. It could change a value, add a node, remove a node and so on (think the “[chosen](https://www.kernel.org/doc/Documentation/devicetree/bindings/chosen.txt)” node). + +It is important to pinpoint that firmware isn’t just a boot thing. Some boot components, like the secure monitor and TEEs, are firmware that run all the time alongside the main operating system. Still these are not entities running “on their own”, but rather they are vital parts of the entire and coherent system. i.e., all components must have knowledge about each other. For example, take a memory carve-out that the TEE needs. The Linux kernel must in some way get notified that the TEE has taken a chunk of memory. Today this happens via Device Tree. If that didn’t take place, then the Linux kernel would have no clue that someone else is using a chunk of memory and eventually would try to use it. + +## A fragmented community? + +When a firmware or boot loader engineer would like to use Device Tree in their firmware, what should they do? Use the DTS (Device Tree File Types) files in the Linux kernel? Patch those and upstream the changes made? Or copy existing DTS-files, make modifications and save them locally in that tree? Unfortunately there is no clear answer to that and we see a mix of these strategies employed. This is a bit unfortunate, since developers in the worst case would need to edit just as many DTS-files as there are copies of it. The ones who are doing that are the “good citizen engineers”. However, it seems more common that engineers just update the DTS-file in the project they are working on and forget about the rest. Maybe they don’t even know where all the DTS copies reside? + +In short, the Arm embedded community has, without realizing it, solved one problem (the “Arm Linux mess” in the Linux kernel), but in doing so has created another problem, i.e., a fragmented Device Tree ecosystem. This means that engineers and system architects have to deal with questions like: Where are the DTS files I need to change? How can we ensure the Device Tree configuration is in sync? How should we pass DTB between firmware components and the main OS? How can we ensure the integrity of the DTB in memory? How can we detect runtime modifications to the DTB in memory? How do we know that the runtime modifications follow the rules? What are the rules? How should we write bindings working across software projects? + +## The train has left the station? + +A fair question to ask is whether there actually is a need to synchronize DTS files? Maybe there isn’t most of the time, as long as the component itself can do what it is supposed to do and then can handover whatever the next runnable expects. Likewise without DTS we would have had the same problem to address, just in another way. Having said that, I’d personally prefer having it all nicely packaged at a single location for a couple of reasons. It makes it simple to find and easy to share and re-use already existing DT configurations when starting a new (firmware-) project. For security reasons it would help a lot to have it all in a single place, since it’d be easier to apply common security schemas in various firmware components. + +As an exercise to see what it would take to compile DTB(s) from a single DTS source, we took the iMX8MQevk device and looked up the dts-files for it in U-Boot and the Linux kernel. The root dts file, “imx8mq-evk.dts” is fairly populated with various configurations and it also includes “imx8mq.dtsi”, which in turn includes a couple of h-files. These files exist both in U-Boot and in the Linux kernel. i.e., nothing is shared between the projects. At first glance they look “similar”. However, when running a diff-tool, it’s immediately clear that there are lots of differences and the differences affect many different areas. Without expertise in all these areas it’s quite a daunting task to try to come up with correct merge decisions. i.e., the likelihood of breaking something is pretty high, which in turn could take quite some time to sort out, since the compile, flash, boot-up turnaround time isn’t negligible. So, even in focusing our efforts only on a single device this turns out to be a pretty complex and challenging task. We should also remember that we were using a combination of two stable git tags, one in U-Boot and one in the Linux kernel. In reality, there could be as many combinations as there are branches and tags in each of the git trees. + +Considering that there are more than 1800 dts-files in the Linux kernel and more than 1100 dts-files in U-boot, and that both projects are fast moving in terms of code changes. Add to the equation the other firmware projects starting to use Device Tree and I think it’s fair to say that trying to merge all dts files to a common project/git is an impossible utopia, i.e., the train has left the station a long time ago. Maybe for a new SoC it’d be possible to do something about it, but for existing devices it seems to be game over. + +## A turtle race + +The things discussed in this article to this point are just a few of the questions that Linaro is working with members and external contributors on the Device Tree Evolution project to try and address. As probably everyone can appreciate, addressing questions like these is not an easy task. It will affect a lot of projects, git trees, build systems, regression testing setups, and there are many people affected both directly and indirectly by any changes that might be proposed. The technical aspect of it is pretty challenging, since the decisions cannot be made by a single community, instead decisions have to be made across different communities and here we’re not talking about small projects. We’re talking about projects like the Linux kernel, U-Boot, Trusted-Firmware, AOSP, TEEs, Xen, KVM etc. On top of this already challenging task there are the normal politics that take place in one way or another in most communities. Internal disagreements within a community, disagreements between different projects and communities, etc. + +Over the years there have been countless discussions around Device Tree, discussions taking place at mailing lists, conferences, meetings between maintainers, meetings between consortiums, companies and so on, so it’s not a problem that has gone unnoticed. Still the impression seems to be that little is actually happening, why is that? + +For the discussions that I’ve personally been part of, people often seem to share a common view of the problems. However, for every single question raised, there are often many answers and often also, many new questions raised by that proposed approach. How can there be so many answers? Consider the number of combinations you would end up with simply by considering: hardware capabilities, hardware constraints, software to use, software configurations, and different use cases within configurations. You will recognize how quickly it becomes overwhelming to consider all use cases and combinations. A subject matter expert in one area might give a perfectly acceptable answer for their use cases, but that same answer could be a showstopper for another engineer working with other use cases. This is not a new nor unique problem and it happens all the time within projects, but there you would typically have a maintainer being able to give the final say. What is unique with Device Tree is it affects multiple projects, multiple maintainers with multiple combinations of hardware and software (configurations). + +Linaro is working with many SoC vendors and we have maintainers in the projects we’ve been discussing in this article. Since Linaro is a neutral player in the Arm ecosystem, we believe that Linaro is a perfect organization to address challenging problems like the ones described here. We organize and run meetings and discussions. We propose work that we believe would improve the Arm ecosystem. + +## So, what should we do with the Device Tree? + +From a security perspective we believe that there is more we can do. If we would have had a single DTS and single DTB shared by all components, then life would have been pretty easy. But as concluded, after discussing and investigating the “common repo” for Device Tree, it looks like we have to see what can be improved from a security perspective when we use Device Tree as we’re using it today. At best the Device Tree (DTBs) are protected with signatures. However, that doesn’t protect against successful runtime attacks taking place after the signature has been verified. We’ve seen that [Fault Injection](https://en.wikipedia.org/wiki/Fault_injection#Hardware_implemented_fault_injection) attacks have become a mature attack vector and therefore we should try to step up the security a notch. It could be anything from doing best practice when it comes to software mitigations, to measured boot where you compute a running hash of the firmware, to runtime integrity verification. Here we have challenges with Device Tree Overlays and other ways to modify Device Tree in runtime. We’ve been debating whether we shall suggest “Device Tree Security Profiles”, which could range from no security to the highest level of security where all the bells and whistles are enabled. + +Another thing we’ve discussed is how to deal with the runtime modifications and the handover of Device Tree when the DTB is in memory and passed between firmware. Right now it seems a bit ad-hoc and it would be good to document a consistent approach for how that should be done. + +At the Jira [page](https://projects.linaro.org/projects/DTE/) for the [Device Tree Evolution](https://www.linaro.org/projects/#DTE) project you will find these two topics in addition to +10 other areas that we’re currently working on. If you’re interested in Device Tree, we urge you to join the Device Tree Evolution project by starting to attend the meetings taking place twice a month. + +**Author: Joakim Bech, Distinguished Engineer, Linaro** + +Joakim is currently a Distinguished Engineer at Linaro and has been a Linux user for more than 15 years where he spent most of the time in his professional [career](https://www.linaro.org/careers/) working with security on embedded devices. Joakim started up the Security Working Group in Linaro in 2013 and was the lead for that team until 2020. Before joining Linaro he had various roles such as architect, team leader and development engineer. + +For more information on Linaro and the work we do, do not hesitate to [get intouch](https://www.linaro.org/contact/)! diff --git a/src/content/blogs/dma-buf-heap-transition-in-aosp.mdx b/src/content/blogs/dma-buf-heap-transition-in-aosp.mdx new file mode 100644 index 0000000..83d1d24 --- /dev/null +++ b/src/content/blogs/dma-buf-heap-transition-in-aosp.mdx @@ -0,0 +1,68 @@ +--- +title: DMA BUF Heap Transition in AOSP +description: > + In this article, John Stultz takes a detailed look at the DMA BUF Heaps + interface that is designed to replace ION. Read about his findings here! +date: 2020-11-17T03:44:32.000Z +image: linaro-website/images/blog/tech_background__under_2mb +tags: + - android +related_projects: [] +author: john-stultz +related: [] + +--- + +## DMA BUF Heaps to replace ION + +With the DMA BUF Heaps interface (designed to replace ION) now upstream, work is quickly happening to migrate both AOSP and vendor ION usage and heap implementations to DMA BUF Heaps. + +The ION framework, originally written by Rebecca Schultz Zavin, was one of the early (\~2010) components of the Android patchset and it provided a way for userspace to allocate memory buffers that would be efficiently shared between multiple devices. The primary benefit of this over other DRM device allocators was the idea that userspace best understands the path of a buffer through the system. For example one buffer might be used for a camera pipeline: + +Camera->ISP->GPU->Display + +Whereas another might simply be an image displayed by an application: + +CPU->GPU->Display + +The trouble is each of the devices in the pipeline may have different constraints: A display may only be able to use contiguous memory buffers, or an ISP may only be able to address 32bits of memory directly. No single device driver understands the possible paths a buffer might take, so the drivers also cannot understand the constraints that devices in the path may have. But if one wants to share a single buffer between all the devices in the pipeline, one needs to make sure it satisfies all the constraints of that pipeline. The ION approach left it up to userland (using the device specific gralloc library) to understand the constraints of various pipelines on a device that a buffer may be used for, and thus it could allocate from a specific ION heap that satisfied those constraints. + +Rebecca was also involved in the early discussions around creating DMA BUF, a generic fd-based handle to a memory buffer, and ION was one of the first users of DMA BUFs when they landed upstream. ION was later added to staging in 2013, and in the following years Laura Abbott maintained it and worked to address issues that the community had with its design. Unfortunately, vendors using ION were not very active in working with the community on ION, so it was difficult as upstream changes were made by the community for vendors to keep in sync. Some upstream changes caused ABI breaks (which is allowable in staging as part of upstreaming), which later caused vendor pain. But instead of participating in finding a good upstream solution, often vendors just reverted upstream changes and shipped older versions of ION in their products. + +## New Interface - DMA BUF Heaps + +Then in early 2019, Andrew Davis started a push to clean up some of the outstanding issues, and I joined in. Rather than changing the ION interface, and causing another ABI break, we introduced a new interface: DMA BUF Heaps. + +The implementation was very minimal. Part of the problem with ION was that it did a lot of things in core infrastructure, and getting the community to agree on all of it was difficult. So we just focused on the allocation interface. This meant, rather than having a lot of common heap logic implemented in the ION core, the DMA BUF heap drivers were fully responsible for their implementations, only sharing the allocation interface. + +Also, instead of having one chardev and specifying which heap to allocate via a heap-mask or heap-id, we instead went with the idea of simplifying it further and having one chardev per heap. This allows for better access-control using sepolicy (instead of having to provide a blanket permission to /dev/ion), avoids any limitations in the number of possible heaps (originally ION was limited to 32 heaps), and allows for more descriptive naming than simple enumeration. This also is helpful as with ION many vendors used the same heap-id number for very different heaps, making userland implementations incompatible. Further we avoid having to create a heap querying interface, and can simply use the directory file names for discovery. + +Andrew also implemented two initial heap drivers: the system heap and a CMA (contiguous memory area) heap. This mapped very closely to the ION heaps in staging, but were greatly simplified to help with community review. This did mean some of the optimizations done in both the ION infrastructure as well as in the ION system heap driver were dropped. This included uncached buffers, large page allocation, page pooling and deferred freeing. The CMA heap was closer, but only added the default CMA region rather than adding all CMA regions (as some drivers expect exclusive management of their region, so exporting it to userland might break those assumptions). + +## Migrating ION users to DMA BUF Heaps + +After many cycles of submission and rework the patches were finally merged in Linux v5.6. At that point, we started efforts to migrate ION users to DMA BUF Heaps. The HiKey and HiKey960 gralloc implementations in AOSP were first to switch over from the ION implementation to DMA BUF Heaps. Then Hridya Valsaraju at Google implemented a helper library called [libdmabufheap](https://android.googlesource.com/platform/system/memory/libdmabufheap/) which provides helper functions for userland to allocate from DMA BUF Heaps as well as supporting compatibility mappings to ION Heaps, so the same code can work on both newer kernels with DMA BUF Heaps as well as older kernels with ION. + +However, one area that was blocking the immediate removal of ION upstream was that the codec2 media framework in AOSP was directly using ION, and without it the system could not even boot. With Hridya’s help, we implemented a new DMA BUF Heap allocator backend for codec2, along with further changes to libdmabufheap as well as other parts of the Android system, and after a number of review and rework cycles, the patches were merged, breaking AOSP from its hard dependency on ION. + +AOSP being able to boot and function properly without ION is a big milestone! And with patches to remove ION from the staging directory in the upstream kernel are already queued to land in v5.11, the migration from ION to DMA BUF Heaps is in full swing, and vendors are already starting to port their ION heaps over to DMA BUF Heaps. However, as the upstreamed DMA BUF Heaps was in many ways simpler then ION, there are still some outstanding features that are missing that we’re working to address. + +Patches by Linaro to provide an uncached-system heap, along with large-page allocation, and other cleanups have already been [submitted upstream for review](https://lore.kernel.org/lkml/20201017013255.43568-1-john.stultz@linaro.org/). Deferred-freeing and page-pooling are still TODOs for the system heap, with the key benefit of the deferred freeing and page pooling being pushing the work of zeroing buffers off into a non performance critical codepath. + +## Zeroing buffers + +Related to that, there has been some interest in heaps that completely avoid zeroing buffers. Now it would be a very bad idea to pass a buffer to userland that hasn’t been initialized, but zeroing buffers that userland immediately passes to a device to fill is quite wasteful. This is a major tradeoff with the DMA BUF Heaps design, as drivers that allocate their own memory can quickly allocate an uninitialized buffer and have the device fill it before passing it to userspace. Whereas, if userland allocates the memory, we must clear the buffer so they don’t accidentally get access to stale kernel or other process data. Thus having some way to allocate buffers which may never be userland accessible (similar to some secure heap implementations) or finding some way to lazily zero uninitialized buffers only when userland tries to first access it would be very useful. + +Hridya Valsaraju has also been working on patches to enable better DMA BUF tracking and accounting statistics. This will help vendors to better be able to debug issues, as when sharing lots of buffers it is easy to lose track of things and waste memory. + +Additionally functionality like [exposing multiple CMA heaps](https://lore.kernel.org/lkml/1594948208-4739-1-git-send-email-hayashi.kunihiko@socionext.com/) have been submitted upstream by Kunihiko Hayashi. Additional changes to enable [heaps as modules](https://lore.kernel.org/lkml/20191025234834.28214-1-john.stultz@linaro.org/) have also been submitted upstream. But with both of these changes, we don’t yet have any upstream users of such functionality, so for now these must stay out of tree, and are likely to be carried in the Android kernel until vendors can submit their heaps upstream. + +Additional changes to provide in-kernel allocator accessors have been included in the Android tree to match ION’s functionality. However, there is still some question as to if this is really a valid use case. This is because if a driver is using an in-kernel interface to allocate a DMA BUF, it is inherently constraining the use of that buffer, as it is not aware of where that buffer may go next. At the same time, it seems silly to have every driver re-implement a DMA BUF exporter in order to provide DMA BUFs to userland, so being able to share existing heap implementations may be reasonable. But again, we need to see the driver implementations using those interfaces being pushed upstream before any such functionality could be included into mainline. + +## Participation from vendors on DMA BUF Heaps + +ION is quickly fading into the sunset, but there is still a fair amount of work to do on DMA BUF Heaps. A common theme here is that we need more participation upstream from vendors on DMA BUF Heaps. Without active input and code submissions upstream from vendors using the interfaces, we do not have a sense of what changes are important for this new subsystem. There is a risk that changes made on a theoretical basis could result in practical performance issues on devices, causing additional work for vendors adapting to the new functionality. I’d like to avoid that, but we need to hear from vendors upstream on what is working and what isn’t. Further, we are limited to what we can push upstream by what upstream users we enable. For this reason, we very much need to have active vendor participation upstream, directly submitting changes, new heaps, and users of such code to the list. + +## About the Author + +John is a developer in the Linaro Consumer Group, focusing on getting Android functionality upstream into the mainline Linux Kernel. He has also been working to support devboards like the HiKey960 and Dragonboard 845c in AOSP. diff --git a/src/content/blogs/dragonboard-845c-in-aosp.mdx b/src/content/blogs/dragonboard-845c-in-aosp.mdx new file mode 100644 index 0000000..5ed9e53 --- /dev/null +++ b/src/content/blogs/dragonboard-845c-in-aosp.mdx @@ -0,0 +1,48 @@ +--- +title: DragonBoard 845c in AOSP +description: > + In this article, John Stultz takes a detailed look at the DragonBoard 845c in + AOSP (Android Open Source Project). Read about his findings here! +date: 2020-04-06T00:48:45.000Z +image: linaro-website/images/blog/48806078402_a2756594c6_k +tags: + - android + - linux-kernel +author: john-stultz +related: [] + +--- + +Over the last year, the Linaro Consumer Group (LCG) has been actively working with the Qualcomm Landing Team and the Google Android Systems Team to get the DragonBoard 845c added as one of the AOSP supported devboards (similar to HiKey, HiKey960 and the Beagle X15). The Qualcomm Dragonboard 845c board is based on the 96Boards Consumer Edition and is an official Android Reference Board. + +One of the most exciting parts of the DragonBoard 845c is that it supports the freedreno graphics driver and mesa, which means the board has a fully open source graphics stack! This avoids the proprietary blob drivers, which while very common in the Android ecosystem, are a constant pain point for adapting to newer kernels and Android revisions. This also means that the board functionality can be completely upstreamed into the mainline kernel, which makes the board a very interesting test target for validating the mainline kernel and stable updates. + +![class=medium-inline DragonBoard 854c 96Board](/linaro-website/images/blog/db845cblog1) + +Another unique aspect of the board is how the Qualcomm Landing Team is enabling functionality for the board. With most devboards, when they ship there is a BSP kernel package (usually containing hundreds if not thousands of patches, against an old kernel version) that provides support for all the board functionality. That BSP tree is usually then forward ported against mainline kernels and some components are then upstreamed, slowly shrinking the stack, though usually not as much as was hoped for, leaving a handful of patches to be continually rebased onto newer kernels. + +![class=medium-inline right DragonBoard 854c 96Board](/linaro-website/images/blog/db845cblog2) + +There may have been such a BSP with the DragonBoard 845c, but the Qualcomm Landing Team started sharing an integration branch that contained all of the work-in-progress patches to enable functionality that they were actively upstreaming against the most recent kernel release. This means initially the board functionality was pretty bare bones. It would boot to UI, USB and ethernet worked - but *Bluetooth*® wireless technology, WiFi and audio were all missing. That said, the set of patches in that tree were usually under thirty, and actively shrinking. And as we’ve moved forward to newer kernels, we see additional functionality showing up, as part of the upstream kernel. This has at times made it feel like progress was moving more slowly, but the extremely valuable aspect is by using this upstream-first approach, we don’t have the technical debt of upstreaming looming overhead, and the board is useful for mainline testing right away. So a big thanks to the Qualcomm Landing Team for their efforts here! It ensures the board will be very valuable for upstream kernel testing for a long time. + +As for AOSP support, the board has technically been a part of AOSP since last October (right before last years’ Linaro Connect San Diego 2019). However, not as much attention was called to it, since we’ve taken this upstream-first approach with the kernel. How we’re handling the kernel in AOSP with the Dragonboard 845c is really new as well. Instead of providing a vendor specific kernel tree, we integrated the board kernel support directly into the android-mainline and android-5.4 kernel source trees as those patches were being pushed upstream. This however meant initially there was no tree from which we could build an official pre-built kernel until after the v5.4 release, making AOSP builds a bit incomplete. So it was not until January when we finally had a prebuilt kernel (built directly from the android-5.4 tree by Google’s build infrastructure) to add to the project. With this, the build process is now very similar to the classic AOSP dev board experience, providing a booting device “out of the box” with AOSP/master, with the added benefit of not having to keep a separate vendor tree in sync with changes as they land in android-5.4 or android-mainline. + +After we got support merged into the AOSP branches, the DragonBoard 845c has also been included in the ci.android.com build testing for both [android-5.4](https://ci.android.com/builds/branches/aosp_kernel-common-android-5.4/grid?) and [android-mainline](https://ci.android.com/builds/branches/aosp_kernel-common-android-mainline/grid?), as well as AOSP/master build testing. In fact, one can easily flash and test the very latest [AOSP/master](https://ci.android.com/builds/branches/aosp-master/grid?) builds from ci.android.com on a dragonboard using their web browser by visiting [flash.android.com](https://flash.android.com/welcome?continue=%2Fcustom) and simply following the instructions! + +Having the DragonBoard 845c support added directly to the android-5.4 and android-mainline kernels have made the board particularly useful for some of the recent [Android Generic Kernel Image (GKI) efforts](https://www.linuxplumbersconf.org/event/2/contributions/61/attachments/69/80/Android_and_Linux_Kernel__Herding_billions_of_penguins_one_version_at_a_time.pdf). The GKI will eventually allow a variety of devices from different SoC vendors to be able to share the same kernel image, enabling the vendor specific hardware via loadable modules. This allows the ownership of the kernel to be split so that vendors can provide driver modules for their device, but the core kernel could eventually be updated directly by Google to provide quick security fixes - very similar to th[e Android Generic System Image (GSI)](https://developer.android.com/topic/generic-system-image). + +![class=medium-inline DragonBoard 854c 96Board](/linaro-website/images/blog/db845cblog3) + +The DragonBoard 845c, along with HiKey960 and HiKey, were used for early proof-of-concept work of the GKI ([demoed at Linaro Connect San Diego 2020](https://twitter.com/johnstultz_work/status/1171915205548183553)), and as the GKI details have been formalized, the DragonBoard 845c has been the first AOSP device able to have its default kernel image be a GKI kernel built by Google’s build infrastructure. This effort required a lot of collaboration between Google, Linaro’s Consumer Group and the Qualcomm Landing Team in order to be able to get all the needed functionality built and functioning properly as a module. As additional functionality is upstreamed (like Bluetooth®, wifi and audio), we are able to validate those changes with the GKI images from android-mainline tree. + +![class=medium-inline right DragonBoard 854c 96Board](/linaro-website/images/blog/db845cblog4) + +The efforts on the DragonBoard 845c have also been very useful in enabling some form-factor devices with AOSP and the upstream kernels. Since it shares the same SoC as many popular devices, such as the Google Pixel 3 and the POCOPHONE F1, we have been able to share effort to bring up both of those devices as well. + +As mentioned earlier, we are using an upstream-first approach to the board support, so there is still a fair amount of work in progress. Upstream Bluetooth® support recently was enabled, WiFi support was also upstreamed and support was added to the android-mainline kernel. There is also active work on upstreaming audio support, which due to complex dependencies needs some additional effort to get it working when loaded from modules to work with the GKI. + +The upstreaming status for the DragonBoard 845c is quite good! There is only one patch outstanding that is needed to get the board booting. After that it's about three dozen patches to enable USB, PCIe, HDMI bridge, and Audio - almost all of which have already been submitted to lkml and a good many are likely to land in the next merge window. We are excitedly and aggressively pushing to have the board fully supported upstream before the next LTS. + +All of this to say, the DragonBoard 845c is a really exciting device for doing development and testing with AOSP/master along with the latest upstream and LTS -stable kernels. If you are interested in trying it out, you can find instructions here: [https://source.android.com/setup/build/devices](https://source.android.com/setup/build/devices) + +See also the 96boards.org page for more information on the board and how to order one: [https://www.96boards.org/product/rb3-platform/](https://www.96boards.org/product/rb3-platform/) diff --git a/src/content/blogs/enabling-uefi-secure-boot-on-u-boot.mdx b/src/content/blogs/enabling-uefi-secure-boot-on-u-boot.mdx new file mode 100644 index 0000000..a4f6b4b --- /dev/null +++ b/src/content/blogs/enabling-uefi-secure-boot-on-u-boot.mdx @@ -0,0 +1,288 @@ +--- +title: Enabling UEFI Secure Boot on U-Boot +description: > + In this article, Takahiro Akashi looks at how UEFI Secure Boot on U-Boot works + and what it is designed to protect you against. Read about it here! +date: 2020-09-28T01:15:38.000Z +image: linaro-website/images/blog/tech_background_1 +tags: + - qemu + - linux-kernel + - arm +author: takahiro-akashi +related: [] + +--- + +U-Boot is a favorite boot loader for embedded devices, supporting a variety of architectures and platforms. In the last few years, a number of new UEFI interfaces have been brought into U-Boot, and the latest element added is Secure Boot. How does it work and what is it designed to protect you against? + +## UEFI U-Boot + +UEFI (Unified Extensible Firmware Interface)\[1] is the specification developed by UEFI Forum to standardize interfaces between firmware and the OS's, aiming to replace legacy BIOS on PC architecture. + +Nowadays UEFI is everywhere. It has been the default on PC and server side, so now is on arm64 platforms. While U-Boot is still popular among embedded world, supporting generic interfaces like UEFI will make it much easier for users to bring a wider range of OS distributions to their platforms with minimized efforts and no customization. Remember that grub can support U-Boot's own APIs but only on arm port. No distributions support it on arm64 or x86. + +Accordingly, a huge amount of effort has been devoted on developing UEFI interfaces on top of U-Boot framework since 2016. Linaro participated in this community activity since 2018 and worked together to help improve the functionality as well as the quality. (At Linaro we focus on the arm ecosystem, but those developments benefit other architectures as well. It might be worth mentioning that, in the latest release, risc-v is added to a list of supported architectures along with arm and x86.) + +To further strengthen interoperability (and hence compatibility with the existing implementation like EDK-II), UEFI U-Boot now reinforces its development goal that it should fully commit and adhere to EBBR (Embedded Base Boot Requirement)\[2]. EBBR is a collective document being developed by the community. It defines a set of requirements that the firmware on embedded devices should follow to enable standard OSs installed without customization. + +At the time of writing this article, UEFI U-Boot provides : + +* most of boottime services (before OS starts) +* a limited number of runtime services (after OS starts) +* a subset of relevant protocols (block devices, console, network etc.) +* minimal boot manager + +There is still plenty of missing features and restrictions, but the functionality is now mature enough to run software like: + +* EDK-II shell +* shim and grub, or more directly +* linux kernel + +While the primary target OS is linux, other OSs like BSD variants are also confirmed to work with UEFI U-Boot. Furthermore, *UEFI SCT (Self Certification Tests)* can also be executed directly on U-Boot. This allows us to evaluate to what extent the current implementation is compliant with the UEFI specification and has contributed to the enhancement in conformity. + +## Secure Boot: How it works? + +Among others, UEFI Secure Boot is a new feature introduced in the latest U-Boot release, v2020.10. (At the time of writing, the status is in -rc5.) + +It is, as the name suggests, a security framework in boot sequence which is designed to protect the system from malware being executed by ensuring that only trusted software, EFI applications and OS kernels, are loaded and executed in the middle of transferring the control from the firmware to the OS. + +In fact, U-Boot already has its own secure boot framework, dubbed FIT Signature Verification. There are always pro's and con's; For example, the original secure boot can sign and verify not only binaries but also other type of data like device tree blob and initrd, and UEFI Secure Boot can only deal with PE (Portable Executable) executables (at least, for now). On the other hand, UEFI Secure Boot provides a more flexible manner for key management in addition to compatibility with existing third party software (including linux distributions). It is not intended to supersede U-Boot original, it's up to the user's choice based on system requirements. + +Since there are a variety of articles about UEFI Secure Boot on websites, for example,\[3], we will not dive into technical details. Instead, the basic logic under UEFI Secure Boot will be outlined here. UEFI Secure Boot is based on message digests (hashes) and public key cryptography technologies. When attempting to load an image file, U-Boot checks for the image's signature against signature databases to determine if the image is trusted or not. + +There are four main signature databases used here. + +* PK (Platform Key) +* KEK (Key Enrollment Key) +* db (allow-list of signatures) +* dbx (deny-list of signatures) + +"db" database may have x509 certificates, hashes of images as signatures and "dbx" may additionally contain hashes of certificates. + +An image will be granted for loading if + +* it is signed and its signature is validated by one of the certificates in "db" (there can be a number of intermediate certificates involved) or +* its message digest is found in "db" + +Likewise, any image will be refused if + +* it is signed and its signature is validated by one of the certificates in "dbx" +* it is signed and verified by "db" but any one of certificates in a chain of trust is found in "dbx" or +* its message digest is found in "dbx" + +In July, the security vulnerability, named "BootHole"\[5], has drawn people's attention. Grub, the de-facto boot loader for linux and other distributions, has a security attack vector due to memory overflow and may possibly allow attackers to execute arbitrary code bypassing UEFI Secure Boot on targeted systems. + +To eliminate this security hole, grub and hence shim must be updated, and at the same time, the chain of trusted boot sequence must also be modified to prevent any old and vulnerable version of software from being loaded and potentially exploited by malicious code. It is expected that, in future security fixes, hashes of all affected binaries will be added to "dbx". (Additionally, shim will maintain its own signature database, MokList/MokListX (Machine Owner's Keys), per OS requests as well.) + +All those signature databases above are kept and maintained as UEFI authenticated variables, which means that they are also protected with their own signatures and that updating their values must be granted by verifying the signatures. PK is used to verify KEK before altering its value, while KEK is a key for updating db and dbx. + +Once PK is enrolled, UEFI Secure Boot is set to be in force. Since PK is the root of chain in trusted boot sequence, it is expected to be stored in a non-volatile and tamper-resilient place on the systems at the factory level. + +The current UEFI U-Boot provides two alternatives for non-volatile variable storage: + +a) a plain file on UEFI System Partition + +b) OP-TEE based variable service + +While a filesystem in (a) doesn't provide any robust protection against being compromised, the secure service running under OP-TEE in (b), EDK-II Standalone Management Mode, is isolated, yet being proxied and accessible from non-secure U-Boot code. Thereby, the option (b) is the only fully secure solution for now. Required patches have been merged for U-Boot and OP-TEE, but some on EDK-II side are still pending. + +Currently, U-Boot has no switch to turn UEFI Secure Boot on and off after enrolling PK. + +## Playing with Secure Boot + +Let's take Red Hat Enterprise Linux (8.2) as a real example and illustrate how we can activate UEFI Secure Boot and install the OS with U-Boot on qemu(arm64). (Please note that this is for demo purposes only, aiming to help people have an easy experience with UEFI Secure Boot, not intended to show that UEFI U-Boot fully meets RHEL's requirements). + +To simplify the required steps, we will go with option (A) in this example. + +### 1. build U-Boot + +CONFIG\_EFI\_SECURE\_BOOT is the only option required in addition to qemu\_arm64\_defconfig to support UEFI Secure Boot. + +``` +$ make qemu_arm64_defconfig +# enable CONFIG_EFI_SECURE_BOOT and CONFIG_SEMIHOSTING +$ make +``` + +### 2. prepare a disk with UEFI System Partition + +Filesystem-based variables service relies on UEFI System Partition to implement non-volatile variables by saving values in a file on the partition. + +``` +$ qemu-img create -f raw redhat_fs.img 5G +$ sgdisk -n 1:0:+100MiB -t 1:C12A7328-F81F-11D2-BA4B-00A0C93EC93B redhat_fs.img +$ guestfish -a redhat_fs.img + > run + > mkfs vfat /dev/sda + > quit +``` + +### 3. acquire Red Hat certificate + +This step is a bit tricky as, AFAIK, there is no website available from which a valid Red Hat certificate can be downloaded. + +Luckily any EFI application may hold associated certificates in its signature (with pkcs7 format), and 'shim.efi', which is to be loaded as the first EFI application, which has been signed with "Red Hat Secure Boot (CA key 1)". You will have to dig into the signature's data structure and retrieve this certificate into a separate file. + +Details are not described here, but you can use "sbverify" command to extract signature data (or authenticode) from the binary and then use "openssl" command to examine and parse it to identify the offset and size of the certificate within it. + +### 4. create data for signature database + +Here, interim "PK" and "KEK" are created as self-signed certificates, while "db" should contain the certificate, dubbed "rh\_ca.crt", from step (3). + +``` +# PK +$ openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_PK/ \ + -keyout PK.key -out PK.crt -nodes -days 365 +$ cert-to-efi-sig-list -g 11111111-2222-3333-4444-123456789abc \ + PK.crt PK.esl +$ sign-efi-sig-list -c PK.crt -k PK.key PK PK.esl PK.auth + +# KEK +$ openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_KEK/ \ + -keyout KEK.key -out KEK.crt -nodes -days 365 +$ cert-to-efi-sig-list -g 11111111-2222-3333-4444-123456789abc \ + KEK.crt KEK.esl +$ sign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth + +# db +$ cert-to-efi-sig-list -g 11111111-2222-3333-4444-123456789abc \ + rh_ca.crt rh_ca.esl +$ sign-efi-sig-list -c KEK.crt -k KEK.key db rh_ca.esl rh_ca.auth +``` + +### 5. start U-Boot & enroll keys into signature database + +Now you can enroll keys at U-Boot command line. First, start qemu with the following command: + +``` +$ qemu-system-aarch64 \ + -machine virt \ + -cpu cortex-a57 -smp 1 -m 4G -d unimp \ + -nographic -serial mon:stdio \ + -semihosting \ + -bios /path/to/u-boot.bin \ + -drive if=none,file=/path/to/redhat_fs.img,format=raw,id=hd0 \ + -device virtio-blk-device,drive=hd0 \ + -cdrom /path/to/rhel-8.2-aarch64-dvd.iso +``` + +Then, + +``` +=> smhload PK.auth 50000000 +=> setenv -e -nv -bs -rt -at -i 50000000: PK +=> smhload KEK.auth 50000000 +=> setenv -e -nv -bs -rt -at -i 50000000: KEK +=> smhload rh_ca.auth 50000000 +=> setenv -e -nv -bs -rt -at -i 50000000: db +``` + +### 6. install the OS + +Start the OS installer: + +``` +=> fatload virtio 1:1 50000000 EFI/BOOT/BOOTAA64.EFI +=> bootefi 50000000 +``` + +Select "Install Red Hat Enterprise Linux 8.2". Eventually, the installer boots up in text mode and stops at the installation menu. + +``` +... +05:32:49 Not asking for VNC because we don't have a network +05:32:50 X startup failed, falling back to text mode +================================================================================ +================================================================================ +Installation + +1) [x] Language settings 2) [x] Time settings + (English (United States)) (America/New_York timezone) +3) [!] Installation source 4) [!] Software selection + (Processing...) (Processing...) +5) [!] Installation Destination 6) [x] Kdump + (No disks selected) (Kdump is enabled) +7) [!] Network configuration 8) [!] Root password + (Not connected) (Password is not set.) +9) [!] User creation + (No user will be created) + +Please make a selection from the above ['b' to begin installation, 'q' to quit, +'r' to refresh]: +``` + +### Installation + +You can configure the options as you like. The point here is on "5) Installation Destination". Select the disk created in step (2) as the destination and + +``` +Partitioning Options + +1) [X] Replace Existing Linux system(s) +2) [ ] Use All Space +3) [ ] Use Free Space +4) [ ] Manually assign mount points + +Installation requires partitioning of your hard drive. Select what space to use +for the install target or manually assign mount points. + +Please make a selection from the above ['c' to continue, 'q' to quit, 'r' to +refresh]: +``` + +Select "Replace Existing Linux system(s)" as we have already created UEFI System Partition in step (2). + +After setting all the choices, continue the installation. + +At the end of installation, you will probably see an error like: + +``` +Question + +The following error occurred while installing the boot loader. The system will +not be bootable. Would you like to ignore this and continue with installation? + +failed to set new efi boot target. This is most likely a kernel or firmware bug. +Please respond 'yes' or 'no': +``` + +You can ignore this message and say 'yes'. What happened here was that the installer failed to set up UEFI variables relating to boot options, ie. "BootXXXX" and "BootOrder" as UEFI variables are not accessible from OS in runtime services. + +### 7. reboot the system + +Once you have successfully done the above steps, you will see in dmesg from efistub code and kernel: + +``` +EFI stub: Booting Linux Kernel... +EFI stub: EFI_RNG_PROTOCOL unavailable, no randomness supplied +EFI stub: UEFI Secure Boot is enabled. +EFI stub: Using DTB from configuration table +EFI stub: Exiting boot services and installing virtual address map... +[ 0.000000] Booting Linux on physical CPU 0x0000000000 [0x411fd070] +[ 0.000000] Linux version 4.18.0-193.el8.aarch64 (mockbuild@arm64-025.build.eng.bos.redhat.com) (gcc version 8.3.1 20191121 (Red Hat 8.3.1-5) (GCC)) #1 SMP Fri Mar 27 15:23:34 UTC 2020 +[ 0.000000] Machine model: linux,dummy-virt +[ 0.000000] efi: Getting EFI parameters from FDT: +[ 0.000000] efi: EFI v2.80 by Das U-Boot +... +``` + +Here is the last magic. Even though no boot option variables were created in step (6), UEFI U-Boot is set to look for a fallback bootable image, "/EFI/BOOT/BOOTAA64.efi," in UEFI System Partition and attempt to start it automatically. + +This binary is actually a copy of OS's boot loader, i.e. shimaa64.efi if UEFI Secure Boot is enabled, and it will also detect an absence of boot options and create them with the OS standard path and start OS's second boot loader, 'grubaa64.efi', which is signed by OS vendor and must be verified before loading. Likewise, it will securely chain the boot sequence to linux kernel. + +Hereafter, U-Boot's efi bootmanager is expected to kick off shim from installed path at every succeeding reboot under secure boot environment. + +### References + +**\[1][https://uefi.org/](https://uefi.org/)** + +**\[2][https://github.com/ARMsoftware/ebbr/](https://github.com/ARM-software/ebbr/)** + +**3][https://access.redhat.com/articles/5254641](https://access.redhat.com/articles/5254641)** + +**\[4][https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/oem-secure-boot](https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/oem-secure-boot)** + +**\[5][https://eclypsium.com/2020/07/29/theres-a-hole-in-the-boot/](https://eclypsium.com/2020/07/29/theres-a-hole-in-the-boot/)** + +For more information on Linaro and the work we do, make sure to [get intouch](https://www.linaro.org/contact/)! diff --git a/src/content/blogs/ensuring-optimal-performance-through-enhanced-kernel-testing.mdx b/src/content/blogs/ensuring-optimal-performance-through-enhanced-kernel-testing.mdx new file mode 100644 index 0000000..daca678 --- /dev/null +++ b/src/content/blogs/ensuring-optimal-performance-through-enhanced-kernel-testing.mdx @@ -0,0 +1,78 @@ +--- +title: Enhanced Kernel Testing for Optimal Performance +description: In this blog, Linaro Interns Mirco Romagnoli and Federico Gelmetti + talk about ensuring optimal performance through enhanced kernel testing. Read + more here. +date: 2021-06-22T01:15:39.000Z +image: linaro-website/images/blog/code +tags: + - linux-kernel + - testing + - toolchain +related_projects: + - LKQ +author: linaro +related: [] + +--- + +## Introduction + +As part of Linaro’s mission to improve the Arm architecture ecosystem, Linaro created LKFT - Linaro’s Kernel Functional Test framework. The mission of Linaro’s Kernel Functional Test Framework is to improve the quality of the Linux kernel by performing functional testing on Arm hardware. + +While functional testing plays a critical role in ensuring the quality of the kernel, it does not cover another aspect of paramount importance: performance. Linaro therefore decided to extend LKFT to also perform performance analysis, and, in particular automatic detection and reporting of performance regressions. We started this effort a few years ago and today we have contributed to the mmtests benchmark suite. This allows us to run mmtests benchmarks in LAVA (Linaro Automation and Validation Architecture), publish the result to squad (Software Quality Dashboard) and then do post processing to find out if we have regressed between the different kernel versions. + +In this blog, Linaro Interns Mirco Romagnoli and Federico Gelmetti talk about the functionality they have enabled to support performance testing. + +## Adding MMTests support to LAVA + +by Mirco Romagnoli + +Thanks to a recent series of patches, LAVA now supports the execution of tests from the MMTests benchmark suite. + +MMTests is a well-proven configurable test suite that runs performance tests against arbitrary workloads and allows you to compare the results of these tests to detect regressions or improvements of different kernel revisions. Up until now, the comparison script required the raw data of the different runs to be on the filesystem, no other loading method was supported. Every time the test boards were used by LAVA, the memory would be wiped, resulting in the raw data used for comparisons being lost at each execution. + +With this patch however, it is now possible to save results and retrieve them to make comparisons. The results of these tests can be exported to JSON, this file can then be used to make the comparisons without the need to have the raw metrics on the filesystem. This allows us to run a MMTests' test, export the results to JSON and then save the results as metrics on LAVA. By giving a specific name to each metric the JSON file can eventually be rebuilt and used with the compare script. + +To find out more, click on the links below to see what has been done: + +**MMTests patchset** +This is the patchset that enables the JSON export and load of the test results. + +[https://github.com/gormanm/mmtests/commit/a4e7a9e19eadb8e75f2be3321ba8cef119becd33](https://github.com/gormanm/mmtests/commit/a4e7a9e19eadb8e75f2be3321ba8cef119becd33) + +[https://github.com/gormanm/mmtests/commit/27d5d2f1dde49f8d2a782893d6e06ac9f1897340](https://github.com/gormanm/mmtests/commit/27d5d2f1dde49f8d2a782893d6e06ac9f1897340) + +[https://github.com/gormanm/mmtests/commit/c964294e9b11a934a59c5ed4df4768a0d79a94a9](https://github.com/gormanm/mmtests/commit/c964294e9b11a934a59c5ed4df4768a0d79a94a9) + +[https://github.com/gormanm/mmtests/commit/95a39b0750c16da8aadff617ca46011828b96513](https://github.com/gormanm/mmtests/commit/95a39b0750c16da8aadff617ca46011828b96513) + +[](https://github.com/gormanm/mmtests/commit/95a39b0750c16da8aadff617ca46011828b96513)**Test definition for LAVA** +Here you can see the workflow for LAVA and the python script that formats the metrics that will be sent to LAVA. + +[https://github.com/Linaro/test-definitions/commit/de4c57c2b8d3d877001b898a601b7753d23d2cfc](https://github.com/Linaro/test-definitions/commit/de4c57c2b8d3d877001b898a601b7753d23d2cfc)[](https://github.com/Linaro/test-definitions/commit/de4c57c2b8d3d877001b898a601b7753d23d2cfc) + +## **Adding a new testcase to lkp-tests** + +by Federico Gelmetti + +[lkp-tests](http://lwn.net/Articles/555968/) \[1] is a framework aimed at testing various parts of a kernel, to track its performance and robustness. +It runs a large set of benchmarks which cover core components of the Linux kernel: virtual memory management, I/O subsystem, process scheduler, file system, network, device drivers, and more. The interest towards lkp-tests was born with the intent of adding the test cases already configured in the suite into Linaro’s [test-definitions](https://github.com/Linaro/test-definitions) \[2] suite. + +After a bit of digging, we came to the conclusion that the best route would be to call lkp-tests from test-definitions directly, to avoid exporting all the individual test cases from lkp-tests to test-definitions. +The first step in this plan is to better understand how lkp-tests works, to see if it can be integrated into test-definitions. The patch I created does exactly that. + +The commit message describes in detail all the steps and components required to add a new test case from the ground up in lkp-tests, in order to provide useful basic information for the subsequent phases of the integration. +The patch is by no means a full implementation of a new test for lkp-tests, but rather a writeup with a practical example of how the framework works and what it needs to operate correctly, that can be referred to later in the development to speed up the integration project. + +**Resources:** + +\[1] [https://01.org/lkp](http://lwn.net/Articles/555968/) + +\[2] [https://github.com/Linaro/test-definitions](https://github.com/Linaro/test-definitions) + +## Conclusion + +The contributions highlighted in this post are the first steps towards making a complete framework for automatic performance analysis. Linaro is already working on next practical steps: adding a general template for automatic email reports on performance and regressions, adding the mmtests suite in LKFT's rootfilesystem, and running multiple benchmarks from mmtests. The next important step will be to add these benchmarks into LKFT's daily runs, and report back to the community. + +For more information on Linaro and the work we do, reach out to us through our [contact page](https://www.linaro.org/contact/). diff --git a/src/content/blogs/force-idle-when-a-cpu-is-overheating.mdx b/src/content/blogs/force-idle-when-a-cpu-is-overheating.mdx new file mode 100644 index 0000000..8b54aeb --- /dev/null +++ b/src/content/blogs/force-idle-when-a-cpu-is-overheating.mdx @@ -0,0 +1,134 @@ +--- +title: Force Idle When a CPU Is Overheating +description: In this article, Daniel Lezcano focuses on a new technique to cool + down the CPUs to help avoid overheating. Click here to read about this new + technique! +date: 2020-09-18T11:22:11.000Z +image: linaro-website/images/blog/chip_background_under_2mb +tags: + - arm + - linux-kernel + - iot-embedded +related_projects: + - PERF +author: daniel-lezcano +related: [] + +--- + +## About the Kernel Working Group + +The Kernel Working Group’s (KWG) primary focus is to be an active contributor to the upstream community and facilitate acceptance of our code into the Linux mainline kernel. Our goal is kernel consolidation - a single source tree with integrated support for multiple Arm SoCs and Arm-based platforms. + +## Introduction + +Today’s CPUs are more and more powerful. More powerful in terms of compute capacity, but also in terms of heat creation. + +In the embedded world, especially in the ARM ecosystem for the mobile platform, the Linux kernel has to cope with the high temperatures created by processor intensive tasks that can lead a CPU to overheat. The thermal framework is the Linux kernel subsystem in charge of handling these use cases. + +The thermal framework and its components were briefly presented in a previous [blog.](https://www.linaro.org/blog/thermal-notifications-with-netlink/) One of the components leveraged to provide improved performance in processor intensive tasks is a passive cooling device, a software component, based on performance state changes. That implies the hardware is [DVFS](https://en.wikipedia.org/wiki/Power_management#DVFS) capable and has a cpufreq driver built on top of it. But what if the system does not have such a feature? Is there an alternative to passively cool down the CPU? + +### DVFS cooling consideration + +The Dynamic Voltage Frequency Scaling hardware is a power management technique to adapt the performance of a CPU given its usage. The performance states are discrete values called “Operating Performance Point” or OPP. OPPs represent a t-uple \, and usually provide platforms with less than ten states because the stability must be validated for each OPP on each device. Validation is time consuming and can become costly. + +Additional power savings is found by undervolting the CPU. The frequency selected in this case is the maximum speed at the given voltage. Frequency scaling without dynamic voltage does not bring any power savings because in the end the cost in energy is the same: only the duration changes for the same amount of operations. + +With DVFS, a kernel governor is in charge of changing the states. Today the schedutil governor prevails because it is directly tied to the [“per entity load tracking” (PELT)](https://lwn.net/Articles/531853/) signal, a polynomial decay on the CPU usage. + +The transition states are very fast (less than 1ms) so the scheduler can accommodate the performance state changes in the PELT signal update. + +Each performance states power consumption is based on the formula: + +![Power = C x freq x V squared](/linaro-website/images/blog/formula) + +The symbol C is the capacitance, a constant depending on the technology, freq is the frequency in Hertz and V is the voltage. To ensure the stability of the system, when the frequency is increased the voltage must also increase, and since voltage is squared, the resulting power consumption is quadratic as shown in the figure below for an ARM64 dev board: + +![Power Consumption chart](/linaro-website/images/blog/power-consumption) + +The first plot shows that power consumption is exponentially increasing with frequency, while the second plot in the first row shows that the compute capacity is linear. The ratio of the power vs the compute capacity is shown in the third plot where we can see that the efficiency is getting worse with the higher frequencies. The fourth plot shows the temperature behavior regarding a run of dhrystone at each OPP, and the highest OPP triggers the mitigation resulting in a longer compute duration because thousands of transitions per seconds reduce the compute capacity. + +As a consequence, the heating effect is the highest when the last OPP is used and, as stated above, that is when the CPU is 100% in use for a long period of time. + +Mitigation begins when the thermal framework reduces the OPP while the cpufreq governor requests for a higher OPP. These two decisions are aggregated through the frequency Quality of Service (QoS) and the thermal framework always wins. + +The cooling effect is immediate when the OPP is reduced from the highest to the next highest level and as the transitions are very fast, that supports the use of an efficient passive cooling device. + +By showing two Hisilicon development boards with very different thermal profiles, the next figure illustrates how the mitigation controls the temperature to keep it close to the 75°C threshold. The ‘dhrystone’ benchmark was used to make the boards warm and then ended its execution close to the 40th second for the hi3660 and to the 55th for the hi6220. Please note that the profile is based on the development boards and does not reflect the end-user packaging with the form factor which can behave very differently. + +![development board thermal profile graph](/linaro-website/images/blog/development-board-thermal-profile) + +The noticeable behavior is the sawtooth aspect of the temperature curves when the mitigation is activated, confirming the immediate impact of the OPP change on the heat. + +It is important to understand how the mitigation has an impact on the performance, so if the cooling effect is high when reducing one OPP, additional power capacity is attained for the next quantum of time to run at a higher OPP. + +Unfortunately some platforms do not have DVFS, or the voltage domain can be shared across different devices, so if one of these devices is in use it will prevent the DVFS to undervolt the performance domain. Thus the cooling effect won’t be as efficient as described above and in the end the mitigation will fail. This leads to a hard system reboot, instability or crash; in other words a non functioning device. + +### Power down the CPU when it does nothing + +If the DVFS technique allows power saving dynamically when the CPU is in use, what happens when the CPU is idle? + +At boot time, the system spawns per CPU permanent tasks at the lowest priority with a dedicated infinite loop entering and exiting a CPU idle routine. As the task is the last one to be executed due to its low priority, it will result in the CPU going to an idle routine to save energy. + +During this pause, the CPU can cool down. + +When an interrupt occurs on the CPU, this CPU exits the idle routine and schedules itself out. The wakeup process resulting from the interrupt is handled by the hardware. + +There can be different idle routines classified by their power consumption: + +* Clock gating: the CPU voltage is untouched but the clock is stopped, that is the fastest idle routine to sleep and wake up, requiring less than 1us for each. But this routine has the highest power consumption. That is what the WFI instruction does on the ARM systems +* CPU retention: the clock is stopped while the voltage is the minimum viable voltage to keep the CPU logic consistent. The power saving is better than the Clock Gating scenario above, but the wakeup is a bit longer. This idle routine does not work as effectively with recent boards having a lot of cores because the CPU is woken up by the cache coherency hardware which decreases the power saving +* CPU power down: the clock is stopped, the voltage is zero, the cache is flushed, it is out of cache coherency and the context is saved. It takes a longer time to enter this state and wake up, the kernel literally boots the CPU, but the power saving is at its maximum as the consumption is close to zero +* Power domain down: if all the CPUs belonging to the same power domain are powered down, then the rest of the logic used by those CPUs can be shut down as well. Even more power is saved on the system at the cost of a higher latency for the wakeup. + +The idle routine selection is determined by an idle governor which does some statistics on the previous events to try to predict when the next wakeup will occur and choose the most convenient idle state. + +The idle task is the last one to be run and it will just enter an idle routine, but there is an exception where the CPU is forced to enter this idle routine. The [suspend-to-idle](https://www.linaro.org/blog/suspend-to-idle/) is a feature allowing the framework to put the entire system into a retention state providing a high power saving but, compared to the hibernate, taking much less time to wake up. + +In order to put the CPU in a power down state, a specific routine has been implemented to force the current CPU to enter an idle state. + +### Idle injection to cool down the CPUs + +On embedded systems, passive cooling is mandatory and it’s critical to provide a way to cool down the CPUs if the DVFS is not available. + +We described the DVFS cooling device and the idle routines, now we have enough material to understand idle injection. + +A new technique to cool down the CPUs has been introduced which resulted in an idle cooling device. The principle is to inject a constant idle period during runtime in order to create duty cycles. This allows power to be reduced linearly. + +The duty cycle is based on the period forced by the fixed idle duration. That means, for a 10ms idle duration, a 50% duty cycle will result in 10ms of runtime, 33% of duty cycle will result in 20ms of runtime. + +Assuming the idle transitions are free, if the CPU is consuming 1000mW, 50% duty cycle will result in half of the power consumption, 500mw. The reality is a bit different as the idle transitions have a cost in terms of energy, so powering down the CPU requires energy. Thus a power consumption of 650mW for a 50% duty cycle is closer to what is actually happening. Strictly speaking, the idle injection cooling device may be considered as an active cooling device rather than a passive one, but that is a subject of debate which is out of the scope of this article. + +Please note that the duty cycle logic is inverted in our case as it is based on the idle duration, not the runtime. + +![Duty Cycle chart](/linaro-website/images/blog/duty-cycle) + +The advantage of a duty cycle approach is that we have clear boundaries for the cooling device minimal and maximal states. Obviously, the minimal is zero meaning there is no mitigation and the maximal is 100, which means the CPU is always idle. Semantically, it makes sense as if the mitigation has to increase the cooling effect by incrementing the state, reaching 100 means the system is trying to do its best to cool down the CPU. Actually, we never observed a value above 55 in the worst case scenario. + +The following figure shows a capture of the kernel traces over a short time frame: + +![Kernel Traces graph](/linaro-website/images/blog/kernel-traces) + +Even if the CPUs are independently managed by the idle injection, in this case the thermal framework grouped them for the same cooling device,it results in synchronously going idle increasing the chances for a power domain shut down. + +The idle injection framework allows a latency constraint, so the idle routine having a wakeup latency greater than the latency constraint will be ignored, that is a way to prevent an inefficient idle routine. + +The following graphs show a comparison between the DVFS and the idle injection techniques as cooling devices with dhrystone workloads running on 10 seconds burst. The first row shows the temperature on a half second sampling, the second row shows the cooling device state. For the DVFS, it represents how many states we decrease from the highest OPP while the idle injection states show the duty cycle percentage. + +![comparison between the DVFS and the idle injection charts](/linaro-website/images/blog/dvfs-idle-injection-comparision) + +The cooling states on the left of the figures represent the index values of the different steps the cooling device, a software component, is using for the cooling effect. For example, a on/off fan will have 1 state while a 7 OPPs capable CPU will have 7 states. The cooling states number is an arbitrary value to choose the level of the cooling effect and depends on the implementation. + +We can observe that the shape of the figures are similar with a more stable temperature resulting from the idle injection, thus proving the mitigation is effective. However the latency introduced by the idle injection will impact the performances much more than the OPP changes would have. + +### Conclusion + +The DVFS based cooling device technique is accurate and efficient. Unfortunately it is not available on all platforms or can fail if the voltage domain is shared with other devices. + +The idle injection based cooling device is a bit more accurate than the DVFS technique but at the cost of introducing additional latency. + +If the platform is using a shared voltage domain, the idle injection and the DVFS cooling devices can be used together. When the voltage domain can be changed, the DVFS cooling device will act as expected, but when the voltage domain is locked, the cooling will fail and the temperature will continue to increase, in this case another threshold can be connected to the idle injection cooling device and this one will act as a backup. + +The thermal framework has a new software based cooling device doing idle injection for the ARM systems. The embedded devices, mobile and automotive, can now rely on it to make the most convenient setup for their system. + +For more information on the work Linaro does, get intouch through [our contact page](https://www.linaro.org/contact/). diff --git a/src/content/blogs/glibc-improvements-and-what-to-expect-in-future-linux-distributions.mdx b/src/content/blogs/glibc-improvements-and-what-to-expect-in-future-linux-distributions.mdx new file mode 100644 index 0000000..6ae20b8 --- /dev/null +++ b/src/content/blogs/glibc-improvements-and-what-to-expect-in-future-linux-distributions.mdx @@ -0,0 +1,188 @@ +--- +title: "GLIBC improvements & what to expect in future Linux distributions " +description: "In this article, we cover glibc improvements in an architecture + way while also highlighting generic features to show what to expect in future + Linux distributions. " +date: 2021-07-15T08:31:35.000Z +image: linaro-website/images/blog/code +tags: [] +author: adhemerval-zanella +related: [] + +--- + +The GNU C Library Project (glibc) provides the core libraries for the GNU system and GNU/Linux systems. Although it is quite an old project (the first release was in 1987), it is actively developed and maintained. Its latest version 2.35 was released on 2022-02-03 with a set of improvements, optimizations, new features, and the usual bug fixes. Linaro and Arm worked on AArch64 enablement and optimizations for recent platform auditions besides the generic improvements. + +This blog will cover the recent glibc improvements in an architecture specific way (with a special emphasis on aarch64) while also highlighting generic features to show developers and system administrators what to expect in future Linux distributions. + +The recent glibc releases bring a lot of newer features, ranging from new schemes to handle optimized shared libraries, to new architecture support, new security features, and hardware extensions. Here are some features from the lastest four releases, from 2.29 to 2.35: + +## AArch64 PAC-RET, BTI, and MTE + +**glibc 2.32** supports newer Arm Architecture features: AArch64 pointer authentication for return addresses ([PAC-RET](https://events.static.linuxfound.org/sites/events/files/slides/slides_23.pdf)) and branch target identification ([Armv8.5-A BTI](https://github.com/llvm-mirror/llvm/commit/4bc81028d48c0ab07e7b429d2a98ed6d15140a23)), while glibc **2.33** supports the Arm Memory Tagging Extension ([Armv8.5-A MTE](https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/Arm_Memory_Tagging_Extension_Whitepaper.pdf)). + +**PAC-RET** is an optional Armv8.3-A extension that basically detects illicit modification of pointers and data. Its main purpose is to harden programs against Return-Oriented programming (ROP) by signing and authenticating the pointers on usage such as locally-scoped pointers / stackframe or PLTs ([Procedure Linkage Table](https://www.technovelty.org/linux/plt-and-got-the-key-to-code-sharing-and-dynamic-libraries.html) which is an essential part of dynamic libraries) entries. Its support is done by a combination of kernel (which handles the internal cryptography key) and compiler support. glibc support was also added and requires some additional support on specific assembly routines. + +The **BTI** (Branch Target Identification) is another optional Armv8.5-A security extension which marks valid targets of indirect branches so the CPU can act (with a trap for instance) when instruction in a protected page tries to perform an indirect branch to an instruction other than a marked BTI. It requires glibc support since it is an opt-in feature set per ELF module via a GNU property note that the dynamic linker has to check and mprotect the executable pages with PROT\_BTI (The GNU property is an ELF extension added by either the linker/compiler or explicit by the source with an inline assembly). The idea is only to allow branches to code mapped with the PROT\_BTI mmap flag. Branches to code mapped on a page without the flags are handled as invalid operations. + +Finally the **MTE** (Memory Tagging Extension) is another arm8.5-a security extension that implements granular memory access through lock and key access to memory. The system then checks the key on each memory access and if the key does not match the lock an error is reported. This is done by using the Top Byte Ignore (TBI) feature of the Armv8-A Architecture, where each memory pointer is ‘colored’ with a different tag. + +The extension requires both kernel and library support, and glibc implements it on its memory allocation routines (malloc and related functions). Although disabled by default, it can be enabled per process with a glibc tunable. The MTE is used by glibc malloc implementation tag to ‘color’ each block of memory (on a 16 byte granule that uses the extra bits in the unused part of the VMA) and also the returned pointer by the memory allocation routines. When the pointer is dereferenced, the pointer's color is checked against the memory's color and if they differ the access is faulted. + +This improves buffer overruns detention (since glibc metadata is colored differently than memory handled to the caller), simple use-after-free (since free will re-color the memory to a different value and freeing it again will mismatch the color when accessing the metadata), and also some double-free scenarios. + +## More Arm optimizations + +The glibc project also keeps adding new optimizations to aarch64 memory and string routines, which are the backbone for a lot of algorithms and one of the most used routines in a lot of programs. See below chart which lists improvements over the releases: + +![More Arm Optimizations](/linaro-website/images/blog/more-arm-optimizations-) + +The best routine selection is done with a mechanism named [GNU IFUNC](https://sourceware.org/glibc/wiki/GNU_IFUNC): in a simplified manner glibc will consult the kernel information along with processor information to bind the most suitable memory and string routine at runtime (so there is no need to provide specialized builds for each chip or vendor). glibc keeps tuning and adjusting the best selection for each processor, recent versions, for instance, the optimized SIMD routines for Neoverse chips. As a result, the best memory operation routines are provided without the user having to program or instruct the library to do so. + +## 64-bit time support + +Recent Linux version (starting from version 5.1 with complete support on 5.4) added support for 64-bit time on 32-bit legacy architecture (such as 32-bit ARM). The **glibc 2.34** also adds support for building programs with 64-bit time support on such architectures. This is enabled with the \_TIME\_BITS preprocessor macro set to 64 and is only supported when LFS (\_FILE\_OFFSET\_BITS=64) is also enabled. + +## C.UTF-8 + +**glibc 2.35** adds a new C.UTF-8 locale, already provided by some downstream distributions and some Operational Systems. The locale supports full code-point sorting for all valid Unicode code points (including Unicode 14.0). + +## glibc HWCAPS loader support + +The library search path allows the loader to use a different library depending on how it is laid out on the filesystem (for instance to load an optimized version depending on the underlying hardware). + +Prior to the new scheme, each architecture defines its own way to handle it. The x86\_64, for instance, defines a subset of its various hardware extensions and maps it to subfolders depending on the cpuid supported flags. As an example, on glibc 2.31 running a haswell processor multiple different paths will be used: + +![glibc running a haswell processor path 1](/linaro-website/images/blog/glibc-2.31-running-a-haswell-processor-path-1) + +It is similar on architectures where hardware support is mapped from the information provided by the kernel (AT\_HWCAP). For instance on a POWER9 processor running glibc 2.17: + +![glibc running a haswell processor path 2](/linaro-website/images/blog/glibc-2.31-running-a-haswell-processor-path-2) + +The way the library search path is defined and combined generates a lot of permutation and leads to two different issues: + +1. More overhead in process initialization due to the multiple filesystem access. This is mitigated by using a library search cache ([ld.so.cache](https://man7.org/linux/man-pages/man8/ld.so.8.html)). +2. It increases the complexity to provide libraries optimized to a specific ABI or processor and how to organize and deploy the optimized builds. + +The solution on **glibc 2.33** is a simplified search path that does not create multiple subpaths based on hardware capabilities, but rather maps it to a specific ABI subfolder. Each architecture can then define a set of ABIs, as [x86\_64 has done](https://gcc.gnu.org/pipermail/gcc/2020-July/233088.html). The current scheme has a bad side effect where the possible combination of hardware capabilities does not scale well (it increases exponentially by each new hardware capability). By defining specific sets, we can limit the possible permutations. + +For instance, on x86\_64 the loader now does: + +![Image of the loader with glibc 2.33 on x86\_64](/linaro-website/images/blog/image-of-the-loader-with-glibc-2.33-on-x86_64) + +The tls and previous haswell files are still present to keep compatibility, but both are set to be removed in future releases. And along with recent GCC support to accept the new x86\_64 ISA on compiler flags, it is simpler to build and deploy optimized versions of a library targeting a specific hardware extension. + +## Auditing API for the dynamic linker support + +**glibc 2.32** adds the missing DT\_AUDIT and DT\_DEPAUDIT support for [rtld-audit support](https://man7.org/linux/man-pages/man7/rtld-audit.7.html), where a program can specify a set of libraries the loader will load similar to the ones specified by the LD\_AUDIT environment variable. + +**glibc 2.35** bumps the module API version (LAV\_CURRENT) and enable proper bind-now support (enabled either by the static linker or through LD\_BIND\_NOW environment variable). The loader now advertises via the la\_symbind flags that PLT trace is not possible. + +Also audit interface on AArch64 is extended to support both the indirect result location register (x8) and NEON Q register. + +## Huge Pages Support + +Linux is constantly leveraging Large Page Support on architectures that supports it (such as AArch64 and x86\_64) by either implementing it without requering [user intervention](https://www.kernel.org/doc/html/latest/admin-guide/mm/transhuge.html) or by improving the userspace ABI. + +On **glibc 2.34** the \_\_morecore malloc hooks was removed as a security improvement and it also removed the way to use large pages on dynamic memory allocation (as done by \[libhugetblfs project]\(https://github.com/libhugetlbfs/libhugetlbfs]. To add back a way to use large pages on malloc **glibc 2.35** adds a new tunable, glibc.malloc.hugetlb, that can be used make malloc issue madvise plus MADV\_HUGEPAGE on mmap and sbrk or to use huge pages directly with mmap calls along the MAP\_HUGETLB flag. Former improves the Trnasparent Huge Pages by aligning the mmap calls and advertising the kernel that memory should be backed up by large pages, while former uses the userspace API directly (thus improving performance by not relying on kernel runtime to handle large page migrations). + +## Unicode support + +Recent glibc versions keep in sync with the latest Unicode standard. Version **2.32** added support for [Unicode 13 along with new languages and glyphs](http://blog.unicode.org/2020/03/announcing-unicode-standard-version-130.html) and version **2.35** updated to the [newest Unicode 14](http://blog.unicode.org/2021/09/announcing-unicode-standard-version-140.html). + +## Support for newer security features + +The new security features help catch potential security issues while developing, using glibc functions which leverage extra compiler support on newer versions of clang and GCC. + +The **glibc 2.33** adds support for \_FORTIFY\_SOURCE=3 when used along with LLVM 9. Level 3 leverages new compiler support to deliver additional fortification balanced against additional runtime cost (checking non-constant bounds). + +The **glibc 2.32** also adds support for GCC 'access' attribute which improves the compiler warning for wrong glibc interface usage. For instance, GCC can diagnose out-of-the-bounds access for functions like read if the input buffer size is known at the caller site. + +The **glibc 2.35** builds all programs as position independent executables (PIE) by default (if the toolchain supports it). + +The **glibc 2.35** also \_FORTIFY\_SOURCE=3 support when used with gcc 12. + +## Optimized math libraries + +The **glibc 2.29** replaced its generic exp, exp2, exp10f, log, log2, pow, sinf, cosf, sincosf and tanf with an optimized version originally from [Arm Optimized Routines](https://github.com/ARM-software/optimized-routines). These new routines also follow the new C standard regarding error handling, which results in less overhead. The performance improvements range from two to three times on both latency and throughput. + +**glibc 2.35** improves more math function performance by removing the slow multiprecision paths. It affects asin, acos, tan, atan, and atan2. + +**glibc 2.35** also optimizes the hypot function, specially for architecture with Fused Multiply-Add instructions. + +## New APIs + +### Linux support + +Tje **glibc 2.30** added the getdents64 function, used on mainly by libraries to access that access the filesystem. It also added both gettid, tgkill, and getcpu to keep in sync with Linux syscalls. + +The **glibc 2.34** continues to improve support for Linux syscalls by adding both [evecveat](https://man7.org/linux/man-pages/man2/execveat.2.html) and [close\_range](https://man7.org/linux/man-pages/man2/close_range.2.html). + +The **glibc 2.35** added the [Restartable Sequences ABI support back](https://www.efficios.com/blog/2019/02/08/linux-restartable-sequences/). The general idea is the kernel will handle any interrupted concurrent memory operation by context switch with a registered fallback procedure. It might be used to accelerates user-space operations on per-cpu data, such as function like [sched\_getcpu](https://man7.org/linux/man-pages/man3/sched_getcpu.3.html) and possible per-cpu memory cached (such as some [memory allocator does](https://github.com/google/tcmalloc)). + +The **glibc 2.35** also adds support for (epoll\_pwait2)\[https://man7.org/linux/man-pages/man2/epoll\_wait.2.html] syscall. + +### ISO C support + +The **glibc 2.34** adds the C2X function timespec\_getres, which obtains the clock resolution similar to the analogous POSIX function [clock\_getres](https://man7.org/linux/man-pages/man2/clock_gettime.2.html). + +The **glibc 2.35** adds support for both %b and %B format for printf-family functions to output intergers in binary as specified in draft ISO C2X. + +### POSIX support + +The **glibc 2.34** adds the \_Fork as an alternative to [fork](https://man7.org/linux/man-pages/man2/fork.2.html). It will be added in the future POSIX standard as an async-signal-safe fork replacement, that does not run any atfork function neither resets any internal state or lock. + +### pthread\_attr\_setsigmask\_np, pthread\_attr\_getsigmask\_np + +This is a glibc extension that allows getting and setting the initial signal mark when a new thread is created. It avoids some signal handler issues, where the user can mask all signals until the thread sets its desired mask. + +### \_\_libc\_single\_threaded + +This new symbol allows an application to find when the process became a multithread one, so it can optimize some routines that require synchronization to handle multiple threads. For instance, newer GCC c++ libraries (libstdc++) use the flag to optimize some C++ classes like std::once\_flag and std::call\_once. + +### sigabbrev\_np, sigdescr\_np, strerrorname\_np, and strerrordesc\_np + +This is a long standing problem on how to map the supported errno and the signal to its name and description in asynchronous signal-safe manner. Both standard defined manner (sterror and strsignal) are either non asynchronous signal safe (either triggering internal routines that try to translate the names to the session set language) or provided an either non-standard access to glibc defined objects (which in turn has its own shortcoming, such as no out-of-bound checks or the creation of copy relocation that increase memory usage). + +The newer APIs are fully asynchronous signal-safe and align with recent discussion [to add a similar API on POSIX standard](https://www.austingroupbugs.net/view.php?id=1138). + +### pthread\_clockjoin\_np + +This is an extension to the pthread\_join where the caller specifies not only a timeout, but also which clock to use. + +### pthread\_cond\_clockwait, pthread\_mutex\_clocklock, pthread\_rwlock\_clockrdlock, pthread\_rwlock\_clockwrlock and sem\_clockwait + +Similar to the pthread\_clockjoin\_np, these are extensions to the pthread routines to allow use of a different clock than CLOCK\_REALTIME with the specified timer. + +### twalk\_r + +This is similar to the existing twalk function, but it passes an additional caller-supplied argument to the callback function. + +### posix\_spawn extensions + +Continuing the extension of posix\_spawn and posix\_spawnp the posix\_spawn\_file\_actions\_addchdir\_np posix\_spawn\_file\_actions\_addfchdir\_np extension allows to specify a directory from when the new process will be spawned. + +The **glibc 2.34** adds the posix\_spawn\_file\_actions\_addclosefrom\_np, which allows closing an arbitrary file descriptor range before spawning the new process (it uses the Linux syscall close\_range if available). + +The **glibc 2.35** adds the posix\_spawn\_file\_actions\_addtcsetpgrp\_np, which allows set the controlling terminal in the new process in a race free manner. It might be used on terminal controllers (such as bash) to optimize the subprocess creation (to use posix\_spawn instead of fork plus execve). + +## Loader improvements + +A new DSO sorting algorithm has been added in the dynamic linker that uses topological sorting by depth-first search (DFS), solving performance issues of the existing sorting algorithm when encountering particular circular object dependency cases. This improves some pathological user cases reducing startup time from 200 seconds to 15 seconds. + +## New standard support + +The **glibc 2.31** supports the feature test macro \_ISOC2X\_SOURCE, which enables features from the draft [ISO C2X standard](https://en.wikipedia.org/wiki/C2x). The GCC 9 and Clang 9.0 compilers support the -std=c2x option to support this standard. The libc part of the standard was already supported for some time by glibc (for instance the functions memccpy(), strdup(), strndup()). + +It also fixes some clarification for the new math functions introduced in the [TS 18661-1:2014 and TS 18661-3:2015](http://www.open-std.org/JTC1/SC22/WG14/www/docs/n2095.pdf). + +## Future Plans + +The next **glibc 2.36** release, planned to be released on August 2022, will have some important changes: + +1. More Linux syscalls support to work with pidfd and the new mount API. +2. Support DT\_RELR relative relocation format, which optimizes binary size of PIE executables. +3. AVX512 support for vector math library. +4. More C2X support with mbrtoc8 and c8rtomb functions. + +For more information on Linaro and the work we do, do not hesitate to [get intouch](https://www.linaro.org/contact/). diff --git a/src/content/blogs/heterogeneous-multicore-systems-the-new-open-source-frontier.mdx b/src/content/blogs/heterogeneous-multicore-systems-the-new-open-source-frontier.mdx new file mode 100644 index 0000000..ea55275 --- /dev/null +++ b/src/content/blogs/heterogeneous-multicore-systems-the-new-open-source-frontier.mdx @@ -0,0 +1,38 @@ +--- +title: Heterogeneous Multicore Systems +description: Heterogeneous multicore computing is now all-pervasive with a + flexible co-processor architecture making it the new open source frontier. + Read more here. +date: 2020-02-13T04:23:39.000Z +image: linaro-website/images/blog/IoT +tags: + - iot-embedded +author: bill-fletcher +related: [] + +--- + +Heterogeneous multicore computing is now all-pervasive. Complex application processors in the mobile and other consumer segments have long featured many cores for various kinds of processing offload. This has typically included modem and wifi functions, DSP, real-time and power control. The presence of the many co-processor cores and engines gives a processing offload capability which frees up the main CPU (or CPU cluster) to run the host OS and application code. + +Configuring, building and maintaining multicore systems has historically been a hard problem. The good news is that there are an increasing number of multicore devices with a flexible co-processor architecture and good support for running non-proprietary software. This flexibility is helping to build a collaborative engineering effort around open source tools and software components to help develop and maintain products supporting a multicore system approach. + +The tasks that are being addressed by this collaboration include: + +A master configuration across multiple cores that are sharing a common set of memory and peripherals. A lifecycle management and communication framework to allow multiprocessing applications to leverage parallelism offered by the multicore configuration. +Standard interfaces for power, performance and system management. + +Linaro and its members are active in all of these areas and are developing a set of software components to help with the System approach to multiprocessing such as Device Tree, SCMI, RemoteProc, RPmsg. + +The Linaro Device Tree Evolution project \[1] is working towards a system view providing a unique system description that contains all peripherals and memories, with an associated view per processor which could be (real or virtualized) Cortex-A, Cortex-M or DSP cores. + +The OpenAMP Linaro Community Project \[2] and Linaro work on underlying RemoteProc, RPMsg provides Life Cycle Management, and Inter Processor Communication capabilities for management of remote compute resources and their associated software contexts. + +The System Control and Management Interface (SCMI) \[3] is extensible and provides standard interfaces to access functions which are often implemented in firmware in the System Control Processor (SCP). + +Linaro will be demonstrating these technologies at Embedded World - showing communication between Zephyr RTOS \[4] and Linux on an Avenger96. Avenger96 is a community development board based on the STM32MP1 \[5]. You can meet us at the Zephyr booth (4-170). + +![96Boards Avenger Board](/linaro-website/images/blog/96BoardsAvenger) + +References + +\[1] Device Tree Evolution Project - [https://www.linaro.org/assets/pdf/Linaro-White-Paper--Device-Tree-Evolution.pdf](https://www.linaro.org/assets/pdf/Linaro-White-Paper--Device-Tree-Evolution.pdf) \[2] OpenAMP - [https://www.openampproject.org/ ](https://www.openampproject.org/)\[3] System Control and Management Interface [https://developer.arm.com/architectures/system-architectures/software-standards/scmi](https://developer.arm.com/architectures/system-architectures/software-standards/scmi) \[4] Zephyr RTOS [https://www.zephyrproject.org/](https://www.zephyrproject.org/) \[5] STM32MP1 [https://www.st.com/en/microcontrollers-microprocessors/stm32mp1-series.html](https://www.st.com/en/microcontrollers-microprocessors/stm32mp1-series.html) diff --git a/src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx b/src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx new file mode 100644 index 0000000..7aca2c1 --- /dev/null +++ b/src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx @@ -0,0 +1,105 @@ +--- +title: High Performance Computing | Forward-looking +description: > + High Performance Computing forms a key part of our Linaro Connect activities + and our virtual conferences. Here we reflect and look at what's next for HPC. +date: 2020-06-26T04:22:14.000Z +image: linaro-website/images/blog/hpc-bg +tags: + - hpc + - open-source + - arm +related_projects: + - HPCAI +author: paul-isaacs +related: [] + +--- + +We have previously reflected on the first step for ARM into HPC. Since then, Linaro has been working to increase awareness of the successes within the [High Performance Computing ARM ecosystem](https://static.linaro.org/assets/HighPerformanceComputingARMecosystem-small.pdf). High Performance Computing now forms a key part of our Linaro Connect activities ([Connect Resources](/cloud-computing-and-servers/)) and more recently our virtual conferences, due to Covid-19, ([LTD20-106 State of ARM-based HPC](https://resources.linaro.org/en/resource/Qte2Z3ajBHienZ3ZbmoWjy)) for 2020. + +At Supercomputing 2019 we saw the public viewing of Fujitsu’s A64FX ([SC’19](https://www.fujitsu.com/global/solutions/business-technology/tc/events/sc19/)) and some of the first sales outside of Japan, in 2020, including another one of Linaro’s members, Sandia National Labs ([FX700](https://share-ng.sandia.gov/news/resources/news_releases/green_processor/)). Sandia, is the notable home of [Astra, the first Arm supercomputer](https://en.wikichip.org/wiki/supercomputers/astra) to join the Top500 list in 2018 and exceed 1 Petaflops (2.332 Petaflops using over 5000 [Marvell ThunderX2](https://www.marvell.com/products/server-processors/thunderx2-arm-processors.html) processors). Marvell continues to be another highly valued member of Linaro. + +The A64FX is the first processor to support a 512-bit hardware implementation of Arm’s Scalable Vector Extension ([SVE](https://developer.arm.com/docs/100891/latest/sve-overview/introducing-sve)). Fujitsu gave a talk in early 2019 to introduce the components that would make up their next Supercomputer (*[A64FX](https://static.linaro.org/assets/A64FXTheFirstSVE.pdf)*). That next Supercomputer is now here, today June 22nd 2020, Riken’s Fugaku is the No.1 Supercomputer in the world according to the [Top500 bi-annual review](https://www.top500.org/news/japan-captures-top500-crown-arm-powered-supercomputer/). In just 2 years since Arm based systems entered the Top500 Fugaku can compute 415.53 Petaflops. This is a 200x increase in processing while only a 20x increase in power consumption from Astra. + +A key component of SVE for the software developer is to code once and be bit-length agnostic from 128 bits up to 2048 bits in 128 bit increments for vector processing. A detailed look was provided by Fujitsu at Linaro's March [Tech Days](https://www.youtube.com/watch?v=OL_ZiXuZXyk). A developer’s application can auto-configure for respective hardware implementation. This has enabled developers to code in [256-bit software emulated environments](https://hub.docker.com/r/linaro/gem5-riken-open) and have the code automatically run optimised on and for the 512-bit hardware unchanged. + +**Overview of Single-Instruction-Multiple-Data (SIMD)** + +Scalable Vector Extension (*[SVE](https://static.linaro.org/assets/SVE-a-sneak-peek.pdf))* is an implementation of single-instruction-multiple-data ([SIMD](https://developer.arm.com/architectures/instruction-sets/simd-isas)). A simple explanation would be to apply the same instruction across data loaded into multiple registers. For example it could be that values are increased by a fixed amount, or a bitwise operation performed in parallel. + +The single-instruction-multiple-data process benefits large volumes of similar data such as that found in graphics calculations and machine learning networks. + +For example: + +[Vector addition](https://en.wikipedia.org/wiki/Euclidean_vector#Addition_and_subtraction): a = b + c; + +[Fuse multiply and accumulate (saxpy](https://developer.arm.com/docs/ddi0596/e/simd-and-floating-point-instructions-alphabetic-order/fmlal-fmlal2-vector-floating-point-fused-multiply-add-long-to-accumulator-vector)) y = a*x + y;* + +*Dot products a*{ij} = x\*{ik}y\_{kj} (in tensor notation) as required in [gemm (matrix-matrix) operations](https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/neon-programmers-guide-for-armv8-a/optimizing-c-code-with-neon-intrinsics/optimizing-matrix-multiplication). + +*“Typically, whole SIMD operations form the inner-most of loops and the registers are assigned to light weight threads, say Open MP, on the next outer layer. A further coarse grained parallelism is then supplied by the outer ‘administrative’ loop layers such as Open MPI which typically allocate heavier blocks, e.g. domain or logical decomposition of work to packages.” (quote: Roger Philp, Linaro HPC Senior Engineer)* + +In the hierarchy of packages, cores, threads, vectors: + +![hierarchy of packages, cores, threads, vector](/linaro-website/images/blog/hpc-hierarchy) + +A typical and recurrent problem encountered with the vector models however is the inability to lock data in registers without automatic flushing, until that data can be completely retired. For example: + +``` +Minimize main memory and cache access +Maximise reuse of array content in say block matrix multiply for instance or convolutions +a[0] = a0 +a[1] = a1 +... +a[n] = an +load a[] -> R <--- c/c++ user control command + <--- loads a[] into cpu register R +lock R <--- c/c++ user control command + <--- R is now read only and not flushable +do lots of other stuff, but register R remains static +unlock R <--- c/c++ user control command + <--- Register R is now rewritable and flushable +``` + +Aarch64 may have a workaround to [enable/disable flushing](https://developer.arm.com/documentation/ddi0595/2021-12/). However, we welcome your thoughts on this topic. Email: hpc-sig@linaro.org + +Linaro’s HPC-SIG are working towards [profiling HPC](https://resources.linaro.org/en/resource/Ld2UGAdVvcTZRs89kSJsbr) vector dependent applications for code hotspots, bottlenecks and cache misses in the Neoverse platform. + +**Background on Scalable Vector Extension (SVE)** + +Prior to SVE, single-instruction-multiple-data was implemented using Arm’s Neon technology. Neon has been used predominantly in accelerating audio and video encoding/decoding as well as 2D/3D graphics. The[ Neon intrinsics](https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/intrinsics?page=1) are function calls that the compiler replaces with Neon instruction(s). [SVE intrinsics](https://developer.arm.com/documentation/100891/0612/coding-considerations/using-sve-intrinsics-directly-in-your-c-code) are a different set of function calls to make use of scalable vector extension hardware. This was the first pass at agnostic vector lengths. A further development for SVE2 (to be utilised by silicon vendors in the future) will enable a common set of intrinsics which can make use of both SVE and NEON hardware optimisations, as described in the Arm presentation at Linaro’s Connect conference ([SAN19](https://resources.linaro.org/en/resource/dURtYSHjBaT7kcCpWXcXcG)). + +**Related applications - [SVE in QEMU's linux-user mode](/blog/sve-in-qemu-linux-user/)** + +Having a standards-based server booting a Linux kernel is the infrastructure. What makes the infrastructure relevant is the application that utilises it. Within Linaro’s HPC-SIG we understand that the infrastructure is becoming more heterogeneous. There are a variety of Instruction Set Architectures, SoCs, ASICs, FPGAs that sit within the modern data center. Applications are being tweaked to make use of new server hardware features. However, the more the application is tuned to given hardware the more complex the maintenance becomes across the hundreds of applications in the ecosystem. + +In the scalable vector extension and NEON examples, compiler intrinsics are included in the code as functions to be converted to optimised hardware instructions. A further development could provide instead for the same intrinsics to be hidden in a backend solution whilst the frontend application is converted to cloud-native calls in an abstraction that is hardware agnostic. Linaro's HPC-SIG is collaboratively exploring how Cloud-Native computing could benefit the high performance computing community and provide public-facing easy-to-use solutions. + +The balance to be struck of differing workloads is to identify how many everyday applications vs mathematically intensive applications might benefit from SIMD. For example, how many 10,000 core rack of systems like the low-power microservices aimed B1000N, based on Linaro member [NXP’s 16-core LX2160A](https://www.nxp.com/products/processors-and-microcontrollers/arm-processors/layerscape-multicore-processor-/layerscape-lx2160a-multicore-communications-processor:LX2160A), versus a high-power system kitted with Marvell’s upcoming 96-core ThunderX3? The question is not either/or, but how many of both. + +**Continuous integration** + +Linaro is well known for its [CI infrastructure](https://ci.linaro.org/). The HPC-SIG will be extending its internal CI testing to have the results publicly available. Linaro already hosts on ThunderX2s the global CI testing for the [Arm variant of OpenHPC](https://developer.arm.com/solutions/hpc/hpc-software/openhpc). Additions to CI will include ML Frameworks such as Arm server builds of [TensorFlow](https://github.com/tensorflow/tensorflow) and [PyTorch](https://github.com/pytorch/pytorch) that benefit from the resources high performance computing can provide. These complement the work carried out by our Edge/Mobile computing groups at https://www.mlplatform.org/ + +**CPU v GPU v SVE** + +Off loading with a CPU+GPU model can be expensive. Small matrices (< 1024x1024) have a high transmission overhead compared to the actual compute time within a GPU, such that a conventional non-vector CPU can complete the process surrounding the whole computation in comparable time. Later this year we could hope to see benchmarks verify the optimism that vector-enabled CPUs could match or even exceed GPU-based performance for significantly larger matrices used in the likes of RESNET-50 and others with 100s of millions of parameters. + +**Infrastructure** + +The method of interconnection used to link compute nodes can have a significant impact on the performance of the system as a whole. What will be interesting to see is how different the Fugaku Supercomputer styled on Fujitsu’s FX1000 with its TOFU interconnect [compares](https://www.fujitsu.com/global/products/computing/servers/supercomputer/specifications/) to the PCIe/InfiniBand enabled Fujitsu FX700 for real workloads. Keep watching and Linaro’s HPC-SIG could create future blogs on performance tuning vector-enabled CPUs. + +High performance computing demands significant investment in skills and budget. If you’re [budget constrained](https://store.avantek.co.uk/arm-servers.html) then it’s still possible to scale up node by node and perhaps forego [commercial licenses](https://developer.arm.com/solutions/hpc/hpc-software) for comparable opensource [toolchains](/core-technologies/toolchain/) and toolkits such as [OpenHPC](https://openhpc.community/). + +However your system is built, you can be sure ARM-based solutions can be your equal partner in HPCand Linaro is here to complement the open source ecosystem. Congratulations once again **Fugaku simultaneously #1 in the Graph500, HPCG, and HPL-AI lists.** + +**Background on Sandia:** Sandia National Laboratories is operated and managed by National Technology and Engineering Solutions of Sandia, LLC., a wholly owned subsidiary of Honeywell International, Inc. National Technology and Engineering Solutions of Sandia operates Sandia National Laboratories as a contractor for the U.S. Department of Energy’s National Nuclear Security Administration (NNSA) and supports numerous federal, state, and local government agencies, companies, and organizations. + +A strong science, technology, and engineering foundation enables Sandia's mission through capable research staff working at the forefront of innovation, collaborative research with universities and companies, and discretionary research projects with significant potential impact. Sandia works with other government agencies, industry and academic institutions to accomplish their missions in the strategic areas of nuclear weapons, national security programs, energy and global security. + +**Background on Fujitsu:** Fujitsu has operations across Europe, the Middle East, India and Africa with more than 25,000 employees across the region. The Fujitsu Group has established a global service structure with operations in more than 180 countries around the world. The largest business area is technology solutions. Fujitsu provides corporate customers around the globe with IT-driven business solutions based on cutting-edge digital transformation technologies, services and high-quality digital business platforms. Fujitsu’s portfolio of solutions are focused on addressing specific business and IT challenges within industry; adapted to retailers, financial services, automotive or manufacturing organizations as well as central and local governmental departments. + +As well as the provision of IT services and solutions, Fujitsu also develop and manufacture a range of electronic products and devices for use across a wide range of applications such as imaging, wireless communications and security. + +**Background on Linaro:** Linaro was founded in 2010 and since its inception Linaro has driven open source software development on Arm. Linaro provides the tools, Linux kernel quality and security needed for a solid foundation to innovate on. The company is made up of engineers across a breadth of specialisms and collaboration is at the heart of the company. Linaro is member driven, with the member engineers working with Linaro engineers to solve shared ecosystem software problems. The membership base at Linaro is diverse and includes some of the most prominent names within the industry. Through this process, Linaro’s principles of eliminating duplication of effort, reducing fragmentation and aiding speed to market are just some of the benefits that Linaro provides to its members and the open source community. diff --git a/src/content/blogs/highlights-from-hpc-asia-2019-and-linaros-arm-hpc-workshop.mdx b/src/content/blogs/highlights-from-hpc-asia-2019-and-linaros-arm-hpc-workshop.mdx new file mode 100644 index 0000000..3d0e3c8 --- /dev/null +++ b/src/content/blogs/highlights-from-hpc-asia-2019-and-linaros-arm-hpc-workshop.mdx @@ -0,0 +1,66 @@ +--- +title: "On the Spot: Highlights from HPC Asia 2019 & Linaro’s Workshop - Open + Source HPC collaboration on Arm Architecture" +author: brian-pang +date: 2019-01-21T09:00:00.000Z +description: On Jan 14th, 2019, not yet half a month into the new year,  HPC + Asia held its annual conference in Guangzhou, China. Led by Linaro, with Arm + ecosystem partners, the Linaro workshop“Open Source HPC on Arm + Architecture”was held on the first day of HPC Asia 2019. It is the first time + in the events 20 year history that Arm ecosystem partners have joined forces + under the leadership of Linaro. +tags: + - arm + - hpc +image: linaro-website/images/blog/hpc-workshop-banner-image +related: [] + +--- + +On Jan 14th, 2019, not yet half a month into the new year,  HPC Asia held its annual conference in Guangzhou, China. Led by Linaro, with Arm ecosystem partners, the Linaro workshop“Open Source HPC on Arm Architecture”was held on the first day of HPC Asia 2019. It is the first time in the events 20 year history that Arm ecosystem partners have joined forces under the leadership of Linaro. + +![HPC Asia 2019 Group photo](/linaro-website/images/blog/hpc-workshop-banner-image) + +HPC Asia is an authoritative international conference on HPC and the related researches held in the Asia Pacific region for decades, promoting the rapid development of HPC in Asia and deeper collaboration around the globe. After 10 years, while countries around the world are racing towards Extreme Computing, HPC Asia came back to China in 2019 after its 2018 annual conference in Japan. While the global collaboration faces challenges today, Linaro leads the win-win collaboration in HPC around the world through open source. + +![HPC Asia 2019 Marketing Graphic](/linaro-website/images/blog/hpc-asia-graphic-blog) + +This  Linaro workshop “Open Source HPC on Arm Architecture” included presentations, a Q\&A and a roundtable panel session. [You can view all presentations and slides here.](/events/arm-hpc-asia-2019/) + +The participants and guest speakers included world leading experts from RIKEN (Japan), HTFC Hartree center (UK), and the HPC center of Shanghai Jiaotong University (China). Industry leaders from Arm, Linaro, Huawei, Fujitsu, Phytium, Gigabyte, HPC Systems Inc., Skymizer, and Quantum Cloud covered everything from chip design to software architecture, system integration, AI and cloud applications. Linaro experts flew from Europe and the US to Guangzhou and shared updates on progress in the Arm HPC ecosystem collaboration, and put forward their research and insights in AI on HPC. + +Topics at the workshop covered all aspects of the Arm server ecosystem, from chip design, hardware, software architecture and standardization to performance tuning, and applications in biology, medicine, meteorology, astronomy, geography etc. It is exciting to see that Arm servers are being used in so many areas, contributing significantly to the global economy. + +As the Arm HPC world leader, experts from [RIKEN](https://www.youtube.com/watch?v=xhzlV91l-zU) and [Fujitsu](https://www.youtube.com/watch?v=mukz1rkFETk) shared details of Post-K design,  revealed the keys to achieve high performance capabilities and gain high memory bandwidth. They also shared the AI Deep Learning research on Post-K. They pointed out the clear path for the next steps of HPC development. As a partner in HPC application, [Quantum Cloud](https://www.youtube.com/watch?v=x56ALWd7OnE) put forward Fabric-Based Arm SoC solution,  Socionext SynQuacer via PCIe Fabric for networking, a very practical and novel method for implementing HPC with Arm SoC chips.  Huawei, [Phytium](https://www.youtube.com/watch?v=QviQQYoTr9Y) and Fujitsu introduced their roadmaps, strategies, newest products and applications of HPC. [HPC Systems Inc.](https://www.youtube.com/watch?v=Virzxq4ui_U) demonstrated their application and cloud services based on Arm HPC on-site. [Gigabyte](https://www.youtube.com/watch?v=ErbkhqNgCJk), a system integration company, showed several comercial Arm server solutions, which are helping HPC servers to materialize. Many experts showed the Arm HPC progress through all sorts of benchmark test results. Linaro provided the "Open Innovation" stage for all partners to show their solutions, and let their diversity shine. + +Several experts emphasized the role of AI acceleration based on Arm SVE, and Arm server standardization. They also acknowledged Linaro's efforts in reducing fragmentation through open source collaboration and the Arm ServerReady program. Arm pointed out that as an example, Astra, the first Arm based cluster built at Sandia National Laboratories in the US( [Sandia National Laboratories joined Linaros HPC group in August 2018](/news/sandia-national-laboratories-joins-the-linaro-hpc-special-interest-group/)),  illustrates that the time for supercomputers based on Arm-based technology has come. Arm would like to see all partners continue to make progress by collaborating in the open source community. It was inspiring to see the expert disclosures at this workshop. The Q\&A sessions were very engaging. + +![HPC Asia 2019 - Dr. Mohamed Wahib at Linaro workshop](/linaro-website/images/blog/hpc-asia-speaker) + +*Dr. Mohamed Wahib at Linaro workshop* + +![HPC Asia 2019 - Arm & Linaro Ecosystem Partner talking at Linaro Workshop](/linaro-website/images/blog/hpc-asia-partners) + +*Arm & Linaro ecosystem partners at Linaro workshop* + +Linaro session at a separate workshop  “Vendor Vision” during HPC Asia 2019 Annual conference. Presented by Elsie Wahlig, Sr. Director of Linaro Data Centre & Cloud Group (LDCG) & HPC SIG and Renato Golin, Tech Lead of HPC SIG, Linaro + +![Roundtable Panel hosted by Elsie Wahlig, Sr. Director of LDCG & HPC SIG. The topic was “Frontiers of AI deployments in HPC on Arm.](/linaro-website/images/blog/round-table-hpc-asia)
*Roundtable Panel hosted by Elsie Wahlig, Sr. Director of LDCG & HPC SIG. The topic was “Frontiers of AI deployments in HPC on Arm.* + +*Guests:  Dr. Satoshi Matsuoka, Riken / Luba,  CEO Skymizer / Pak Lui, Principal Architect, Huawei* + +![Demo booths at HPC Asia 2019](/linaro-website/images/blog/demo-booths-at-hpc-asia-2019) + +*Demo booths* + +*Huawei, Gigabyte and Skymizer showing their Arm servers and AI application* + +![Q\&A: Engaging discussion among scientists from various national HPC labs](/linaro-website/images/blog/hpc-workshop-2019-q-and-a) + +Q\&A: Engaging discussion among scientists from various national HPC labs + +Just after the Linaro workshop, HPC Asia core founders and steering committee members congratulated Linaro, saying they thought this workshop was“terrific!" + +![Jill Guo, EVP Linaro, head of Linaro Greater China, made the welcome speech and announced the upcoming Linaro BKK19 Connect in Bangkok,Thailand.](/linaro-website/images/blog/JillGuo-LinaroheadofLinaroGreaterChina) + +Jill Guo, EVP Linaro, head of Linaro Greater China, made the welcome speech and announced the upcoming Linaro BKK19 Connect in Bangkok,Thailand. diff --git a/src/content/blogs/highlights-from-linaro-connect-on-qualcomm-technologies-and-open-source-software.mdx b/src/content/blogs/highlights-from-linaro-connect-on-qualcomm-technologies-and-open-source-software.mdx new file mode 100644 index 0000000..0394597 --- /dev/null +++ b/src/content/blogs/highlights-from-linaro-connect-on-qualcomm-technologies-and-open-source-software.mdx @@ -0,0 +1,48 @@ +--- +title: Qualcomm Technologies & Open Source Software at Linaro Connect +description: In this blog we list the sessions presented at Linaro Virtual + Connect to highlight achievements related to Qualcomm technologies and open + source software. +date: 2021-05-19T03:20:25.000Z +image: linaro-website/images/blog/tech_background +tags: + - open-source + - linux-kernel +author: nicolas-dechesne +related: [] + +--- + +Linaro plays a key role in the Qualcomm ecosystem, [employing several developers and maintainers of key Qualcomm subsystems and drivers](https://www.linaro.org/services/qualcomm-platforms-services/). With more than 2500 Qualcomm related contributions in the upstream Linux kernel, Linaro is continuously improving the support for Qualcomm Snapdragon processors. In addition, we deliver and maintain Linux and Android reference BSP for the DragonBoard 410c, the DragonBoard 820c, the Qualcomm® Robotics RB3 and Qualcomm Robotics RB5 platforms. + +In this blog we list the sessions presented at the most recent Linaro Virtual Connect by Linaro engineers to highlight achievements related to Qualcomm technologies and open source software. To view a session video or download the presentation, click on the relevant session heading below. + +# [Qualcomm Upstream Update](https://resources.linaro.org/en/resource/fByWApNzZYHAAsdR2mSXZi) + +In this session, Bjorn Andersson (Principal Tech Lead, Linaro) provided a general status update on the upstream support currently happening for a growing number of Qualcomm platforms. + +# [The Qualcomm IPA Driver ](https://resources.linaro.org/en/resource/P9mzGkAzt5cJZHe2zAGtUp) + +In this talk, Alex Elder (Senior Engineer, Linaro) presented "the story" of the IPA upstream driver, an overall status update and roadmap. The Qualcomm IPA (IP Accelerator) is a component in Qualcomm Snapdragon processors that provides wireless internet access to an application processor using a modem. Qualcomm has a "downstream" IPA driver for Linux, but for certain upstream-based environments, downstream code is not acceptable. There is now a driver for the IPA in the upstream Linux kernel, derived from, but now very different from, the downstream code. What started as about 45,000 lines of code was simplified, cleaned, refactored, and evolved into the 14,000 line driver that eventually was accepted upstream. The driver continues to undergo additional development, now supporting multiple generations of IPA hardware on three distinct SoCs. This session provided an overview of the role IPA plays in this system, followed by some discussion of the evolution of the code from its "simplified" starting point to its upstream implementation. + +# [Modern Modem Support in Linux](https://resources.linaro.org/en/resource/WGZmwEwLFyYm2Yo2SNjR6k) + +The arrival of the fifth-generation mobile network, known as 5G, promises an even more connected world, featuring billions of devices, from smartphones to connected vehicles, including network gateways, always-connected laptops, telemedicine machines, IoT gadgets and more. With Linux being the major OS in the embedded world, Linux support for WWAN modems is crucial and will certainly impact industries over this decade. During this presentation, Loic Poulain (Senior Engineer, Linaro) gave a brief update on cellular modem support in Linux, the software stack and its components, and how Linaro, with its partners, contributes to cutting-edge upstream modem support in Linux. + +# [Arm Laptops](https://resources.linaro.org/en/resource/VBsmUgK9iExiqSt5hf7jgw) + +Only recently have AArch64 laptops arrived on the market that are suitable for Linux developers. In this session, Richard Henwood (Server Software Ecosystem Manager, Arm), Bjorn Andersson (Principal Tech Lead, Linaro) and Shawn Guo (Tech Lead, Linaro) reviewed the options and support for upstream Linux kernels, GNU user space and associated tooling that make up a modern GNU/Linux distribution. In particular they focused on laptops that provide UEFI boot process and reviewed specific platforms for their current status. + +# [Supporting Qualcomm wcn3680 WiFi on Android and upstream](https://resources.linaro.org/en/resource/ndC3Y3r5WfpozRyzsCnRzN) + +In this session, Bryan O-Donoghue (Senior Engineer, Linaro) talked about adding support for the wcn3680 WiFi chipset to upstream and Android kernels. This session included a brief overview of wcn3620, wcn3660 and wcn3680, the initial state of hardware support upstream, lessons learned and more. + +In addition to the sessions presented by Linaro engineers, we were pleased to invite Dev Singh (Senior director, Business development and GM of autonomous robotics, drones and intelligent machines, Qualcomm Technologies, Inc.) to present a keynote: + +# [Qualcomm keynote on AI & 5G Enabling the Next Generation of Robotics](https://resources.linaro.org/en/resource/cjBGD2tBQ2Mykhn5WwZHru) + +In his keynote, Dev Singh talked about the transformative power of 5G and AI technologies in creating the next generation of high-compute, low-power robots and drones for the consumer, enterprise, defense, industrial, and professional service sectors. He also spoke about scaling 5G and AI and how doing this will help solve a wide range of robotics challenges - from enabling enhanced security and connectivity to high-accuracy AI inferencing and superior power-efficiency. + +# What is to come? + +We will shortly be announcing the dates for the upcoming Linaro Virtual Connect Fall 2021 where we expect to have plenty more sessions related to Qualcomm technologies. Make sure to follow Linaro’s social media channels for updates and for more information on the work Linaro does on Qualcomm platforms and how we can help, [click here](https://www.linaro.org/services/qualcomm-platforms-services/). diff --git a/src/content/blogs/highlights-from-lund-linux-con-2019.mdx b/src/content/blogs/highlights-from-lund-linux-con-2019.mdx new file mode 100644 index 0000000..e5e6e6e --- /dev/null +++ b/src/content/blogs/highlights-from-lund-linux-con-2019.mdx @@ -0,0 +1,33 @@ +--- +title: Highlights from Lund Linux Con 2019 +author: niklas-cassel +date: 2019-06-12T23:00:00.000Z +description: A few weeks ago, Linaro attended Lund Linux Con 2019, the largest + conference in Scandinavia focused on the Linux Kernel. Here are some of the + highlights. +tags: + - open-source + - arm + - linux-kernel +image: linaro-website/images/blog/uncompressed_crowd4 +related: [] + +--- + +A couple of weeks ago, the annual Lund Linux Con (LLC) [https://lundlc.org](https://lundlc.org) was held in Lund, Sweden. 
  
This was the sixth iteration of the Linux conference and this time Linaro was one of the main sponsors for the event. See below picture of the t-shirt Linaro sponsored. + +![Linaro t-shirt](/linaro-website/images/blog/linaro-tshirt) + +According to the organizers, it is the largest conference, focused specifically on the Linux kernel, in all of Scandinavia.  
  
LLC is a "half open" conference, which means that it is formally by invitation only, but as long as you have any relationship with the Linux kernel, you simply have to ask for an invite.  
  
The conference tries to bring people together from the "greater Lund area", which is a comical definition, since a lot of the attendees come from Copenhagen, the capital of Denmark, which is only 40 minutes away with train.  
  
Even though the conference claims to focus on the local region, it has previously hosted well known kernel developers such as Thomas Gleixner, Julia Lawall, and Christoph Hellwig, from outside the local region.  
  
However, the local kernel developers such as Linus Walleij, Ulf Hansson, Matias Bjørling and Jesper Dangaard Brouer should be familiar to those who follow the linux kernel mailing list.  
  
The attendees come from a number of different companies, including Linaro, Red Hat, Axis, Western Digital, Samsung, Sony, Volvo, Bosch, Ericsson, etc.  
It's quite rare to see so many local companies, from widely different areas, come together at a single event. + +![Comapnies](/linaro-website/images/blog/uncompressed-companies) + +
The discussions this year were focused on performance, RISC-V, and also included a follow up discussion on Linux in cars from previous years.  
  
Presentations this year included, but are not limited to:   + +"Reworking of KVA allocator in Linux kernel" by Uladzislau Rezki (Sony), a reimplementation that improves the speed of the vmalloc allocator by more than 50%. This major rework has since been merged into the v5.2-rc1 kernel.  
  
"It's all speculative" by former OpenSSL core maintainer Andy Polyakov (Chalmers University of Technology), who gave an interactive presentation explaining how issues such as Spectre and Meltdown are possible. People from the audience were brought up on stage to represent different instructions in a CPU instruction  
pipeline, in order to better visualize how speculative execution works.  
  
"Mopping up kernel messes one at a time, this time: licensing" by Thomas Glexiner (Linutronix), who explained the issues with unclear licenses, especially for small and medium size businesses. Turns out that there have been more than one hundred different variations of the GPLv2 text alone. They have even found and fixed GPL-incompatible source files.  
  
"RISC-V and Linux State of the HART" by Damien Le Moal (Western Digital), who explained how far the RISC-V port of Linux has come, and the current  
state of the ISA base and its extensions. He also gave us a live demo of a single board computer that lacked an MMU, and which only had 8 MB of on-chip SRAM, running Linux with a busybox user space.  
  
"Bufferbloat mitigation in the Linux WiFi stack" by Toke Høiland-Jørgensen (Red Hat), who explained what bufferbloat is, and how to mitigate it. Bufferbloat has basically been fixed for Ethernet, but for WiFi, there are still some things left to  
be done. The problem is a bit different for WiFi, since there is a lot of overhead for each frame transmitted, so frames are usually aggregated. Another problem is that there is usually an internal queue in the WiFi firmware, which adds additional latency/buffering.  
 
Linaro has a small office in Lund, where you can find the following developers:  + +* Joakim Bech - Security Working Group   +* Jens Wiklander - Security Working Group   +* Niklas Cassel - Landing Team - Qualcomm   +* Ulf Hansson - Power Management Working Group   +* Linus Walleij (Assignee - Arm) - Kernel Working Group diff --git a/src/content/blogs/history-of-the-interconnect-framework.mdx b/src/content/blogs/history-of-the-interconnect-framework.mdx new file mode 100644 index 0000000..92e96f5 --- /dev/null +++ b/src/content/blogs/history-of-the-interconnect-framework.mdx @@ -0,0 +1,58 @@ +--- +title: History Of The Interconnect Framework +description: In this article, Georgi Djakov takes a detailed look at the history + behind the Interconnect Framework. Read more on his findings here! +date: 2020-12-22T04:28:00.000Z +image: linaro-website/images/blog/city +tags: [] +related_projects: + - PERF +author: georgi-djakov +related: [] + +--- + +# Saving Power & Improving Performance With Dynamic Interconnect Scaling + +![Interconnect Framework Timeline](/linaro-website/images/blog/interconnect-framework-timeline) + +The Interconnect API is a framework for configuring the on-chip interconnects in the system. It provides an API for drivers to express their bandwidth needs when transferring data and interacting with the different hardware blocks in the system. The framework tunes the system for the best power and performance while taking into account the aggregated traffic between the different endpoints. It was merged in January 2019 and is available in the Linux kernel since v5.1. In this article I’ll share more about the history behind it. + +In March 2016, at the Linaro Connect in Bangkok, Stephen Boyd did a presentation titled “[Dissecting the 2M LoC QC fork](https://www.slideshare.net/linaroorg/bkk16500-dissecting-the-2m-loc-qc-fork)”. He gave an overview of how different a SoC vendor kernel is from the mainline Linux. The SoC vendors usually fork the mainline kernel and add support for their hardware. The changes added on top are generally not just drivers, but also significant changes all over the place, including changes into core frameworks and tons of code to support new features and standards, various optimizations and tuning for specific use-cases. + +At that time I also remember seeing this picture, which was later used in the “[The Upstream Bubble](https://resources.linaro.org/en/resource/WH2rT7Uo2dtkbfEdeaeqdd)”. It’s a functional dependency graph showing what kind of dependencies each individual driver has on one of the Qualcomm SoCs at that time. The Bus Scaling node, in the center of the graph is colored in dark blue, which means that many drivers are not able to function without it. It was also circled in red, to denote that this particular feature is not supported upstream. + +![Functional Dependency Graph](/linaro-website/images/blog/upstream-bubble) + +## Bus Scaling + +The Bus Scaling node represents a piece of code known in the downstream Qualcomm kernels as msm-bus. The task of this driver was to configure the bus performance across the entire SoC. This happens based on the requests from clients and involves configuring clock frequency, latency and QoS parameters for each bus. This driver also takes care of enabling access to resources on the bus (which very likely could be disabled by default in order to save power). So without it, many other drivers would not be able to function. + +Linaro’s spirit is all about open-source collaboration, so we started brainstorming with engineers from Linaro, Qualcomm and the Linux community, wondering how such a feature could be supported in the mainline Linux and whether it could be useful for others too. The existing downstream code was vendor-specific and written with different requirements in mind, keeping it away from meeting the upstream standards. This was expected, as the SoC vendors are mainly focusing on their products and upstreaming drivers takes more time and discussion. For a completely new framework, it could be much more. The code had to be completely rewritten and I decided to get familiar with the theory and start from scratch. + +At Linaro Connect September 2016 in Las Vegas, I made a presentation about the evolution of the SoCs and the Network-On-Chip concept, explaining the problem that we wanted to solve. There were many ideas about expanding the current frameworks like PM QoS and the Generic PM domains. Many discussions followed - about topologies, governors, links between devices, device-tree bindings etc. + +## Power Consumption + +In 2017, I started posting RFC patches on the mailing lists. The initial feedback was very minimal, so in September, I undertook a demo and made a presentation about it at Linaro Connect in San Francisco. The demo setup was with two Dragonboard 410c boards, wired with ARM energy probes to measure their power consumption in real-time, while running different use-cases (video playback, idle, etc). One of the boards was scaling the interconnects dynamically and the other one did not. The board with the scaling was showing a lower power consumption by up to 26% in some use-cases. People were interested and we had a nice discussion with kernel developers from different companies. It turned out that some SoC vendors were also working in this area, although they were far behind - compared with what Qualcomm already had. I was getting more confident that we should find a common solution. But on the other side, many people expressed concerns that supporting such functionality would be very difficult to implement as it would require significant changes to many existing kernel frameworks. After discussing with the Linux community at the Linaro Connect conference about the pros and cons of reusing and extending the existing infrastructure, considering a completely new framework seemed to be the best option. + +## Scaling Interconnect Bus + +Then in April 2018, Vincent Guittot and I made a presentation (Scaling Interconnect bus) at the [OSPM Summit](https://lwn.net/Articles/754923/). Explaining the idea to other kernel experts and maintainers with more details and example use-cases helped considerably to get a clearer picture. This was a common problem, which was solved differently in the SoC vendor kernels - often by introducing hacks and abusing existing frameworks. There was definitely a need for a common solution. I proposed a new API and the feedback was very positive with people agreeing that such functionality deserves a new framework. However, we had other problems to soon tackle. The first one was adding bandwidth support to OPP for consumers that can’t determine their own bandwidth needs. The second one was to allow shared paths to be used by CPUs and DSPs coexisting in the same SoC. + +During the next few months, I just continued updating the patches and in September I made another presentation at Linaro Connect (On-chip interconnect API). Then in November, Vincent Guittot kindly offered to make the same presentation at the Linux Plumbers Conference. + +As the patches continued to mature, I contacted a few developers from different SoC vendors to make them aware of this work and gain some confirmation that the framework will work for them too. The patches have been included in some of the CI builds at Linaro and also in linux-next to get a wider test coverage. Alexandre Bailon posted on the mailing list a driver for the i.MX 7ULP platform, and now we had provider drivers for not just Qualcomm platforms. In parallel, I posted a few examples of how the new API should be used by consumers. This helped people to start using it in the Qualcomm drivers they post upstream. + +## Kernel v5.1 + +After a few more iterations, finally, v13 of the framework was merged in January 2019 for kernel v5.1 including the support for some of the Qualcomm platforms. Then more and more features were added like the bandwidth support in OPP tables. A big problem was also keeping the initial interconnect configuration (done by bootloaders), that we didn’t want to change until all drivers have probed and expressed their bandwidth needs. Otherwise a path might get disabled before the user had a chance to request the amount of bandwidth it requires. We solved it by extending the sync\_state support that was added in the driver core by Saravana Kannan. Meanwhile, patches to add initial support for Exynos and Tegra have been posted and drivers from different subsystems started to use the framework - CPU, GPU, display, I2C, UART, SPI, USB, MMC, hardware accelerators, video decoders etc. + +## Platform Support + +Linux v5.11 will support multiple platforms by four different vendors. There are 7 Qualcomm platforms supported upstream and the company is switching to the upstream interconnect framework for their new products that will be based on the recently announced Snapdragon 888 platform. The i.MX8 series application processors by NXP are supported since Linux v5.8 and now Samsung’s Exynos and Nvidia’s Tegra are gaining initial support. Drivers for Mediatek platforms are under review on the mailing lists. I am very happy to see that more companies are joining the party and making use of the Interconnect framework to solve their problems and benefit. And that’s what Linaro is all about - leading hardware and software companies collaborating on open-source software for ARM platforms. + +Many thanks to the people who participated in the initial discussions or provided feedback during the development: +Vincent Guittot, Saravana Kannan, Sean Sweeney, David Dai, Mike Turquette, Kevin Hilman, Alexandre Bailon, Bjorn Andersson, Daniel Lezcano, Ulf Hansson, Rob Herring, Rafael Wysocki, Evan Green, Viresh Kumar, Greg Kroah-Hartman and others. + +For more information on Linaro and the work we do, do not hesitate to [get intouch](https://www.linaro.org/contact/). diff --git a/src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx b/src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx new file mode 100644 index 0000000..6666dfa --- /dev/null +++ b/src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx @@ -0,0 +1,127 @@ +--- +title: How and why Linaro builds, boots and tests over a million Linux kernels + per year +description: > + In 2021, Linaro addressed an increase in Linux kernel release candidates + whilst also detecting and reporting more than double the amount of + regressions, compared to the previous year. In this blog, Engineering Manager + Benjamin Copeland talks about why we are building over a million kernels and + why this matters for overall Linux Kernel Quality. +date: 2022-02-08T02:14:57.000Z +image: linaro-website/images/blog/code_banner +tags: + - linux-kernel + - testing + - ci +author: ben-copeland +related: [] + +--- + +## Introduction + +In the past year, Linaro has addressed an increase in Linux kernel release candidates whilst also detecting and reporting more than double the amount of regressions, compared to the previous year. + +Linaro’s Linux Kernel Functional Testing ([LKFT](https://lkft.linaro.org/)) has dealt with these Release Candidates (RC’s) within a 48hour [SLA](https://en.wikipedia.org/wiki/Service-level_agreement) (Service Level Agreement). This is no small feat given we have been able to build, boot and test more than a million kernels. These numbers are even more impressive when you take into account that LKFT has achieved all this without extra staffing. + +## What is LKFT (Linux Kernel Functional Testing)? + +Our goal is to “Improve the Linux kernel quality on the Arm architecture by performing regression testing and reporting on selected Linux kernel branches and the Android Common Kernel (ACK) in real time.” +To achieve this, LKFT provides a testing framework which builds, boots and tests the Linux kernel. This is the core of LKFT, and we have built it into a framework, which allows us and users to easily plugin into a testing framework, namely on the Arm architecture. + +![class=medium-inline left Linux Kernel Functional Test Image](/linaro-website/images/blog/linux-kernel-functional-test-image) + +What does real time mean in this context? It means we are committed to the Linux community (through an SLA) to report regressions to kernel maintainers within 48 hours of changes being pushed to linux kernel branches. Our core mission is to report back regressions on Long-term support (LTS), but also stable, and upstream development branches (next/mainline) where we can. We have five full time engineers working across multiple time zones, 24/7, 365 days a year to make sure we reach our SLA target, and due to the effort of our team we have not missed an SLA. We do this across multiple hardware platforms, however we focus mainly on Arm. You can see [the list of boards tested on the LKFT website](https://lkft.linaro.org/boards/). + +## Looking into the numbers… + +2021 was by no means a quiet year! We worked on 524 [LTS](https://www.kernel.org/category/releases.html) Release candidates (RC’s). This was an increase of 33% over 2020, where we saw 393 RC’s released. This increase didn't just emerge from thin air. It is partly because LKFT has been reporting problems back to stable maintainer Greg Kroah-Hartman, which in turn helps Greg to iterate faster. Of that increase, we reported 94 regressions, which is a 113% increase from 44 regressions reported in 2020. + +![class=medium-inline right Regressions detected by LKFT in 2021](/linaro-website/images/blog/regressions-detected-by-lkft-in-2021) + +“While Google is a great help to me in the LTS effort, providing huge amounts of resources to make my life easier with this (i.e. funding Linaro's testing efforts), their promise to their customers/users does not depend on me keeping LTS kernels alive, if I stopped tomorrow their contracts are still in place and they know how to do this work themselves (as is proof with 3.18).” [Greg K H](https://lore.kernel.org/lkml/YBBkplRxzzmPYKC+@kroah.com/) + +The LKFT workload increased by 33%, while our regressions reported increased by 113%. + +![class=medium-inline left Tests executed by LKFT in 2021](/linaro-website/images/blog/tests-executed-by-lkft-in-2021) + +We built 1,203,113 kernels configurations by the end of 2021. This is up 760% from 139k in 2020. Additionally, we almost doubled our test execution count to 144,355,862. This is a 91% increase from 75,622,248 in 2020. +These numbers are impressive, and we will go into more detail further down the blog post explaining the increase. + +What is interesting here is we built slightly more LTS kernels (3187 in 2020 over 3558 in 2021) however from those 3558 Git pushes we ran 90% (68,733,614) more tests. + +## Android Common Kernel (ACK) + +In the Android space we cover all Android userspace and kernel versions of interest. In 2021, these stood as Android 8-mr1, Q, R, S, T and aosp/master for the userspace; and the kernels were various android branches for 4.4 / 4.9 / 4.14 / 4.19 / 5.4 / 5.10 / 5.15 and mainline. We run these kernel builds on Devices-Under-Test ([DUT’s](https://en.wikipedia.org/wiki/Device_under_test)); these include DB845c, Hikey, Hikey960 and X15. + +We run a wide range of test suites including the main Android test suites, Compatibility Test Suite ([CTS](https://source.android.com/compatibility/cts)) and Vendor Test Suite ([VTS](https://source.android.com/compatibility/vts)). We also do testing beyond the scope of CTS/VTS including benchmark tests like boottime, antutu, benchmarkpi, quadrantpro, vellamo3 in order to cover regressions as much as possible. + +We have a total of 69 kernel + userspace + DUT combinations (e.g. android12-5.4/Android12/HiKey960) that we test. We have added 19 (33% increase) additional combinations in 2021. + +Also in 2021, we ran about 747 Million tests across these 4 DUTs, sending out > 400 test reports for \~600 kernels across 1200+ combinations. Compared to 500M tests in 2020, this is approximately a 50% increase in test cases run. + +## Why are we building over a million kernels? + +In 2021 we added GCC-11 as well as Clang-12, 13 and Clang-nightly. We now build with GCC 8, 9, 10 and 11 plus Clang 10, 11, 12, 13 and clang-nightly. + +We have added 64K pages, KASAN, Debug, Kunit and armv8-features, compat, allmodconfig as additional kernel configs. + +## Why does this matter? + +The more toolchains and kernel configurations we can build, boot and test, the better chance we have at reporting regressions back to upstream communities. + +We run a series of [test suites ](https://lkft.linaro.org/tests/)(e.g. LTP, kselftest, perf, Libhugetlbfs, KVM unit tests, S Suite and kunit) to name a few. We run these test suites with our built kernels, under our [DUT’s](https://lkft.linaro.org/boards/) and then report back. This directly improves the quality of kernels before it hits in-field devices. + +## How has LKFT been able to build more Linux kernels? + +LKFT uses [TuxSuite](https://tuxsuite.com/) as the engine for enabling expansive Linux kernel builds.. + +TuxSuite provides a cloud-based Linux kernel build and testing services. The mission of TuxSuite is to provide “on-demand APIs and tools for building Linux kernels in parallel and at scale.” Hence we are able to build an unlimited number of kernel configurations in a parallel manner. How have we been able to do this though? + +In the past we built kernels on our Linaro [Jenkins](https://ci.linaro.org/) using bare metal servers, however as you can imagine this has limiting factors. It has proven difficult to build-in parallel (or provide enough builders on demand to meet our needs without paying for idle servers). So, in LKFT we migrated from Jenkins to GitLab pipelines (kernel source mirrors), this mixed with the parallel building of tuxsuite, is the reason we have been able to increase our kernels with additional configurations and toolchains by 760%. + +We use Gitlab kernel source mirrors to trigger our pipelines, which coordinates the testing process between all of our services in LKFT. A quick overview of our system is that we use GitLab pipelines to trigger our LKFT framework (building/booting/testing/reporting). + +We do this using our own autoscaling infrastructure, much like how TuxSuite builds out its parallel system, ours uses GitLab autoscaling with self-hosted runners to trigger Tuxbuild jobs. So when we have a large push, between our own infrastructure and Tuxbuilds it gives us the ability to build an `unlimited` amount of kernels. This has been paramount to being able to achieve our increase. + +## What else has improved? + +We are always working with our test-suite communities, like LTP. We have recently started testing LTP development branches against the most recent stable kernel, and reporting back. + +We have also added automated build bisection, which allows us to identify which commit has caused the build regression. This is done inside the LKFT framework, alongside using git-bisect. This has proven very useful for the team, since this was a manual process. We have many more improvements we want to do in bisection, but that is a blog post in itself! However, this is a great start and has proven useful for our team. + +## What are we focusing on in 2022? + +![LKFT Roadmap for 2022](/linaro-website/images/blog/lkft-roadmap-2022) + +We will continue working on our core mission (reporting regressions to Linux stable RC) but we strive to build, test and improve the Linux Testing on the Arm architecture as much as possible. A key aspect of our roadmap is to increase the throughput of our engineers through improvements in tooling (for example TuxSuite and GitLab pipelines). We also work closely with the LTP/kselftest communities to improve testing and reporting to the Linux kernel community. + +## Testing and Emulation + +Hardware is expensive to purchase and maintain. We are always looking at ways of increasing our testing capacity, but this is not always possible when you have limited hardware. Over the years, [QEMU](https://www.qemu.org/) has proven a very useful technology for LKFT. QEMU allows us to virtualise our hardware testing environment in a reliable way which means we can leverage QEMU to improve test coverage. + +Currently we are running QEMU on baremetal from our LAVA LAB, but we are limited to how many QEMU targets we can run. This is due to the fact that physical machines running QEMU can only run so many instances in parallel. This is a similar problem we had at the start of the blog post with regards to the kernel building. + +Tuxsuite have an open source tool, [tuxrun](https://tuxrun.org/). This tool works similarly to [tuxmake](https://tuxmake.org/). By using 'tuxsuite test' this allows the user to scale to an `unlimited` amount of QEMU devices in the cloud. + +The roadmap for LKFT here is to integrate tuxrun into the LKFT framework. Both projects have alignment to do to get the full LKFT test-suite working and we are already working on that. Throughout the year we have wanted to improve our QEMU testing, like supporting [FVP](https://developer.arm.com/tools-and-software/simulation-models/fixed-virtual-platforms) (Fixed Virtual Platforms), running different QEMU versions, booting different architectures (not just X86/Arm). + +This will benefit us greatly, as it will allow us to increase our testing throughput and to test on multiple platforms. + +## Benchmark performance regression + +Performance testing will allow us to identify performance regressions in the Linux kernel. These types of regressions are currently not identified, and we have set about making them identified, especially for Arm. + +Paolo Valente has worked closely with LKFT to get a benchmarking framework in place. Paolo has posted a couple of blog posts [here](https://www.linaro.org/blog/automatic-detection-and-reporting-of-performance-regressions/), and [here](https://www.linaro.org/blog/ensuring-optimal-performance-through-enhanced-kernel-testing/). He describes some of the areas and decisions that were made to use mmtests as a benchmark testing framework. + +Paolo and his students have done fantastic work providing us with the building blocks of getting mmtests working on Arm and decoupling the framework into a workable test-suite. + +The work set out on our roadmap is to continue the work that Paolo and his students did and implement it into our LKFT framework. We have many moving components to get this working. Namely we need to build a rootfs, add more benchmark tests, integrate into a pipeline and test how it runs on hardware. This will allow us to produce a baseline and which then will allow us to report benchmarking regressions. Of course in this there will be many issues, especially finding the hardware but we will endeavour to make this a goal of 2022. + +## Closing notes + +This is by no means a complete list of 2022 work (more can be seen in our roadmap), but these are some of the focus areas we will be working on in 2022. Of course the team will be focusing on the core mission of real-time reporting of Release Candidates. We will, as always, work closely with the upstream linux kernel and test suite communities and keep on reporting those pesky regressions. + +I would like to thank everyone in LKFT for the hard work and dedication that has gone into allowing us to achieve these numbers and achieve more each year. + +For more information on the work we do on Linux Kernel Functional Testing, check out our [Linux Kernel Quality Project Page](https://linaro.atlassian.net/wiki/spaces/LKQ/overview). diff --git a/src/content/blogs/how-to-build-flang-on-windows-on-arm.mdx b/src/content/blogs/how-to-build-flang-on-windows-on-arm.mdx new file mode 100644 index 0000000..e47c600 --- /dev/null +++ b/src/content/blogs/how-to-build-flang-on-windows-on-arm.mdx @@ -0,0 +1,90 @@ +--- +title: How to build flang on Windows on Arm +description: "In this blog post we look at how to build flang on Windows on Arm. " +date: 2022-03-01T09:38:26.000Z +image: linaro-website/images/blog/llvm-image +tags: + - windows-on-arm + - open-source +author: diana-picus +related: [] + +--- + +Last week we published a blog on [how to set up Windows on Arm for LLVM development](https://www.linaro.org/blog/how-to-set-up-windows-on-arm-for-llvm-development/). In this blog we provide a step-by-step guide on how to build Flang on Windows on Arm. + +## What is Flang? + +Flang is a Fortran front-end developed as part of the LLVM project. It is still a young project and Linaro is contributing to its growth on both Linux and Windows on Arm. + +If you need to build Fortran code on Windows on Arm, there really isn't any battle-tested native open source compiler to rely on. We are hoping to help flang fill that void. + +Naturally, the first step is to be able to compile flang itself on Windows on Arm, and this is exactly what this blog post will cover. + +## How to build Flang + +In order to get a build of flang, you will first need to setup your machine for LLVM development as described in [our previous blog post ](https://www.linaro.org/blog/how-to-set-up-windows-on-arm-for-llvm-development/)mentioned above. + +You will have to add flang, clang and mlir to the list of enabled projects. + +You will also need to tell clang where to find the builtins, otherwise the linker will complain that it cannot find symbols such as \_udivdi3 or others. Adding the path to the builtin library as described in the [Clang Compiler User’s Manual — Clang 13 documentation](https://clang.llvm.org/docs/UsersManual.html#finding-clang-runtime-libraries) doesn’t seem to be working at the moment (bug report pending). Luckily, we can work around this issue by adding the builtin library directly on every link command + +(`-DCMAKE_*_LINKER_FLAGS=path/to/clang_rt.builtins-aarch64.lib`; + +if you’re lucky you’ll only need `CMAKE_EXE_LINKER_FLAGS`, but depending on the compiler version you’re using and your other build flags, you might also need to set `CMAKE_SHARED_LINKER_FLAGS` or `CMAKE_STATIC_LINKER_FLAGS`). + +You should end up with a script that looks something like [this](https://github.com/rovka/f18-llvm-project/blob/flang-woa/flang/examples/build_flang.bat): + +``` +1REM You need to modify the paths below: +2set build_dir=path\to\where\you\want\the\build (must already exist) +3set clang_root=path\to\where\clang\is\installed +4set clang_version=x.y.z (should match what’s in %clang_root%) +5 +6REM Some helper variables. +7REM Setting CMAKE_CL_SHOWINCLUDES_PREFIX to work around PR27226. +8set cmake_flags=^ +9 -DCMAKE_BUILD_TYPE=Release ^ +10 -DLLVM_ENABLE_ASSERTIONS=ON ^ +11 -DLLVM_INSTALL_TOOLCHAIN_ONLY=ON ^ +12 -DLLVM_BUILD_LLVM_C_DYLIB=ON ^ +13 -DCMAKE_INSTALL_UCRT_LIBRARIES=ON ^ +14 -DCMAKE_CL_SHOWINCLUDES_PREFIX="Note: including file: " ^ +15 -DLLVM_DEFAULT_TARGET_TRIPLE=aarch64-unknown-windows-msvc ^ +16 -DLLVM_HOST_TRIPLE=aarch64-unknown-windows-msvc ^ +17 -DLLVM_TARGET_ARCH=AArch64 ^ +18 -DCLANG_DEFAULT_LINKER=lld +19 +20cd %build_dir% +21 +22set clang_path=%clang_root%\bin\clang-cl.exe +23set builtins_path=%clang_root%\lib\clang%clang_version%\lib\windows +24set builtins_lib=clang_rt.builtins-aarch64.lib +25 +26set CC=%clang_path% +27set CXX=%clang_path% +28 +29REM We enable clang because it is needed by the flang driver. +30cmake -GNinja %cmake_flags% ^ +31 -DLLVM_ENABLE_PROJECTS="clang;flang;mlir" ^ +32 -DLLVM_TARGETS_TO_BUILD="AArch64" ^ +33 -DCMAKE_C_FLAGS="-fms-compatibility-version=19.20" ^ +34 -DCMAKE_CXX_FLAGS="-fms-compatibility-version=19.20" ^ +35 -DCMAKE_EXE_LINKER_FLAGS="%builtins_path%/%builtins_lib%" ^ +36 ..\llvm-project\llvm || exit /b +37 +38ninja all || ninja all || ninja all || exit /b +``` + +Depending on which revision of flang you’re building, you should now have one or both of `f18.exe` or `flang-new.exe` in `%build_dir%/bin`. + +At the time of writing, clang version [13.0.0](https://www.google.com/url?q=https://github.com/llvm/llvm-project/releases/download/llvmorg-13.0.0/LLVM-13.0.0-woa64.zip\&sa=D\&source=docs\&ust=1646131480669885\&usg=AOvVaw1ILobDb_KMB4Hxid6JOAZa) is known to work for building flang without code generation support. +However, if you want to build a flang that can generate actual binaries, you need to use custom development branches. The latter is based on the community’s [fir-dev branch](https://github.com/flang-compiler/f18-llvm-project/tree/fir-dev), which is currently in the process of being upstreamed, plus some minor changes to get things working on Windows on Arm. + +You can use the steps described above to build even this version of flang, and the resulting executable should be able to compile a simple ‘hello world’ (to link it with the builtins, pass `-Xlinker /path/to/clang_rt.builtins-aarch64.lib` to flang, just as you did earlier to compile flang itself). + +## Conclusion + +We are naturally working hard towards getting upstream flang to work on Windows on Arm as smoothly as possible. Current progress is tracked in [Jira](https://linaro.atlassian.net/browse/WOA-47). If you would like to get involved, the best places to get in touch with the upstream flang community (or just to keep an eye on the status quo) are [the slack workspace](https://github.com/llvm/llvm-project/blob/main/flang/docs/GettingInvolved.md#chat) and [the community calls](https://github.com/llvm/llvm-project/blob/main/flang/docs/GettingInvolved.md#calls). Stay tuned for an upcoming blog on how to compile and use the LLDB debugger on Windows on Arm laptops. + +Linaro is working with Arm, Microsoft and Qualcomm to enable open source packages to run natively on Windows on Arm. For more information about this project, click [here](https://linaro.atlassian.net/wiki/spaces/WOAR/overview). diff --git a/src/content/blogs/how-to-emulate-trusted-platform-module-in-qemu-with-u-boot.mdx b/src/content/blogs/how-to-emulate-trusted-platform-module-in-qemu-with-u-boot.mdx new file mode 100644 index 0000000..dfd2736 --- /dev/null +++ b/src/content/blogs/how-to-emulate-trusted-platform-module-in-qemu-with-u-boot.mdx @@ -0,0 +1,404 @@ +--- +title: How to emulate Trusted Platform module in QEMU with U-Boot +description: In this blog, Linaro Tech Lead Ilias Apalodimas provides a step by + step guide to emulating Trusted Platform module in QEMU with U-Boot. Read more + here! +date: 2022-01-06T11:08:44.000Z +image: linaro-website/images/blog/Trusted_Services2 +tags: + - u-boot + - qemu +author: ilias-apalodimas +related: [] + +--- + +## Do I need a Trusted Platform Module (TPM)? + +The short answer is "yes you do". + +TPMs are microcontrollers designed for cryptographic tasks. Don’t think of them as crypto accelerators though, since carrying out cryptographic operations on your CPU will always be faster. However they can encrypt and decrypt information and have a significant advantage over your CPU doing so, since the keys are tied to the TPM. +When the TPM is initially configured, it generates a Storage Root Key or SRK. You can then ask the TPM to generate a new keypair for you, which the TPM will encrypt using the SRK, and hand it over to the caller. When the OS needs to encrypt or decrypt something, it loads the key into the TPM. The TPM then decrypts the key and performs the requested operation. + +Another interesting functionality of the TPM is the ability to measure the system state using Platform Configuration Registers or PCRs, combined with the key ‘sealing and unsealing’. + +PCRs start zeroed out and can only be reseted if on a system reboot. Those can be extended by writing a SHA hash (typically SHA-1/256/384/512 for TPMv2) into the PCR. The TPM concatenates the new hash to the existing PCR value and another SHA is calculated. This new value is now stored in the PCR. + +TPMs also have the ability to seal and unseal keys. They can create and secure keys bound to specific platform states (and thus measurements). In order to unseal the key, the PCRs have to contain the exact same values that they had when the key was sealed. So for example you can create an encrypted filesystem with the encryption key sealed safely into your TPM. The filesystem will only be decrypted if the TPM ends up in the expected state. + +On typical measured boot scenarios the firmware (or even BL1 is some devices) hashes itself to specific PCRs. So you can have for example BL2 measure itself and then measure BL31, BL32, BL33 and even GRUB config files, commands, Linux initrd etc. This effectively creates a chain of trust, which guarantees system components that we choose to measure have not been modified. Changing any of these would result in a different set of hash values. + +## U-Boot Support + +There is a specification defining a [standard](https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf) interface to the TPM on an UEFI platform. Its purpose is to define APIs and provide information, for things like, is a TPM present, which PCR banks are active, change active PCR banks, obtain the TCG boot log, extend hashes to PCRs, append events to the TCG boot log etc. +U-Boot recently got support for this, as well as support for the [TCG PC Client Platform Firmware Profile](https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_PFP_r1p05_v22_02dec2020.pdf). +Patches for U-Boot were contributed by Linaro and can be found [here](https://lore.kernel.org/u-boot/20201112222210.876652-1-ilias.apalodimas@linaro.org/), [here](https://lore.kernel.org/u-boot/20201127162932.1965323-1-ilias.apalodimas@linaro.org/) and [here](https://lore.kernel.org/u-boot/20210813071243.18885-1-masahisa.kojima@linaro.org/). +It can be tricky to find an Arm device with a TPMv2. If you have a board with an RPMB and OP-TEE support, we recommend trying Microsoft's [fTPM](https://github.com/microsoft/ms-tpm-20-ref/). However testing that in QEMU won't work since it lacks RPMB emulation. Luckily there is another solution. + +## Using SWTPM + +[SWTPM](https://github.com/stefanberger/swtpm) is a TPM emulator that works under QEMU. It provides a memory mapped device which adheres to the [TCG TPM Interface Specification](https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClientTPMInterfaceSpecification_TIS__1-3_27_03212013.pdf). U-Boot lacked an MMIO TPMv2 driver up until [this patchset](https://source.denx.de/u-boot/u-boot/-/commit/e0ff3489974415873426188c71c613d2d28de6e3). + +## Building U-Boot + +``` +git clone https://github.com/u-boot/u-boot.git +pushd u-boot +make qemu_arm64_defconfig +make menuconfig +``` + +The qemu defconfig includes the needed CONFIG\_TPM, CONFIG\_TPM2\_MMIO and CONFIG\_EFI\_TCG2\_PROTOCOL options. Make sure you enable CONFIG\_CMD\_EFIDEBUG as well, since we will need it to boot our kernel. + +``` +make -j $(nproc) +popd +``` + +## Running QEMU + +Make sure swtpm is installed and running on your system. For Debian and friends there's a swtpm package, so just do + +``` +sudo apt install swtpm +mkdir /tmp/mytpm1 +swtpm socket --tpmstate dir=/tmp/mytpm1 \ + --ctrl type=unixio,path=/tmp/mytpm1/swtpm-sock \ + --log level=40 --tpm2 -t -d +``` + +and launch QEMU with swtpm support + +``` +qemu-system-aarch64 -nographic -no-acpi \ +-bios u-boot.bin -machine virt \ +-cpu cortex-a57 -m 2G \ +-drive if=virtio,file= \ +-chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock \ -tpmdev emulator,id=tpm0,chardev=chrtpm \ +-device tpm-tis-device,tpmdev=tpm0 +``` + +## Booting linux + +From U-Boot's command line do something along the lines of + +``` +virtio scan +efidebug boot add -b 0 'Linux' virtio 0 boot/Image -s 'root=/dev/vda' +efidebug boot order 0 +bootefi bootmgr +``` + +If everything is compiled and launched correctly, you should see the kernel reporting the location of some related EventLog pointers. + +``` +efi: EFI v2.80 by Das U-Boot +efi: TPMFinalLog=0x13ddcc040 RTPROP=0x13ddcb040 SMBIOS=0xffffe000 +TPMEventLog=0x13ddc4040 MEMRESERVE=0x13ddc3040 +``` + +## Reading the EventLog + +I am using a debian qcow2 image, where I have installed the latest tpm2 tools. If you don't have them install them with + +``` +sudo apt install tpm2-tools +``` + +The kernel exposes the eventlog in /sys. So you can read it with: + +``` +tpm2_eventlog /sys/kernel/security/tpm0/binary_bios_measurements +--- +events: + - EventNum: 0 + PCRIndex: 0 + EventType: EV_NO_ACTION + Digest: "0000000000000000000000000000000000000000" + EventSize: 37 + SpecID: + - Signature: Spec ID Event03 + platformClass: 0 + specVersionMinor: 0 + specVersionMajor: 2 + specErrata: 2 + uintnSize: 2 + numberOfAlgorithms: 2 + Algorithms: + - Algorithm[0]: + algorithmId: sha1 + digestSize: 20 + - Algorithm[1]: + algorithmId: sha256 + digestSize: 32 + vendorInfoSize: 0 + - EventNum: 1 + PCRIndex: 0 + EventType: EV_S_CRTM_VERSION + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "0772fd675fbebcdd4401008ee8d609760c1675df" + - AlgorithmId: sha256 + Digest: "69f66450f9a8780cf2fbab358d46b8fabd4b7e9ae886b3d80083646c30e91b4c" + EventSize: 74 + Event: "552d426f6f7420323032312e31302d7263322d30303031302d67643536666231666138352d646972747920284175672032342032303231202d2030363a33343a3335202b303830302900" + - EventNum: 2 + PCRIndex: 7 + EventType: EV_EFI_VARIABLE_DRIVER_CONFIG + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "57cd4dc19442475aa82743484f3b1caa88e142b8" + - AlgorithmId: sha256 + Digest: "115aa827dbccfb44d216ad9ecfda56bdea620b860a94bed5b7a27bba1c4d02d8" + EventSize: 53 + Event: + VariableName: 61dfe48b-ca93-d211-aa0d-00e098032b8c + UnicodeNameLength: 10 + VariableDataLength: 1 + UnicodeName: SecureBoot + VariableData: "00" + - EventNum: 3 + PCRIndex: 7 + EventType: EV_EFI_VARIABLE_DRIVER_CONFIG + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9b1387306ebb7ff8e795e7be77563666bbf4516e" + - AlgorithmId: sha256 + Digest: "dea7b80ab53a3daaa24d5cc46c64e1fa9ffd03739f90aadbd8c0867c4a5b4890" + EventSize: 36 + Event: + VariableName: 61dfe48b-ca93-d211-aa0d-00e098032b8c + UnicodeNameLength: 2 + VariableDataLength: 0 + UnicodeName: PK + - EventNum: 4 + PCRIndex: 7 + EventType: EV_EFI_VARIABLE_DRIVER_CONFIG + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9afa86c507419b8570c62167cb9486d9fc809758" + - AlgorithmId: sha256 + Digest: "e670e121fcebd473b8bc41bb801301fc1d9afa33904f06f7149b74f12c47a68f" + EventSize: 38 + Event: + VariableName: 61dfe48b-ca93-d211-aa0d-00e098032b8c + UnicodeNameLength: 3 + VariableDataLength: 0 + UnicodeName: KEK + - EventNum: 5 + PCRIndex: 7 + EventType: EV_EFI_VARIABLE_DRIVER_CONFIG + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "5bf8faa078d40ffbd03317c93398b01229a0e1e0" + - AlgorithmId: sha256 + Digest: "baf89a3ccace52750c5f0128351e0422a41597a1adfd50822aa363b9d124ea7c" + EventSize: 36 + Event: + VariableName: cbb219d7-3a3d-9645-a3bc-dad00e67656f + UnicodeNameLength: 2 + VariableDataLength: 0 + UnicodeName: db + - EventNum: 6 + PCRIndex: 7 + EventType: EV_EFI_VARIABLE_DRIVER_CONFIG + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "734424c9fe8fc71716c42096f4b74c88733b175e" + - AlgorithmId: sha256 + Digest: "9f75b6823bff6af1024a4e2036719cdd548d3cbc2bf1de8e7ef4d0ed01f94bf9" + EventSize: 38 + Event: + VariableName: cbb219d7-3a3d-9645-a3bc-dad00e67656f + UnicodeNameLength: 3 + VariableDataLength: 0 + UnicodeName: dbx + - EventNum: 7 + PCRIndex: 4 + EventType: EV_EFI_BOOT_SERVICES_APPLICATION + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "d2702383b2e042ebefbb318f9382fd094c1c2d6b" + - AlgorithmId: sha256 + Digest: "8fb6a4c7a0c7e7f22b944906eb07786cd3860394c7929e7c0768b2c99a2a3d87" + EventSize: 162 + Event: + ImageLocationInMemory: 0x7adb3000 + ImageLengthInMemory: 893720 + ImageLinkTimeAddress: 0x0 + LengthOfDevicePath: 130 + DevicePath: "01041400b9731de684a3cc4aaeab82e828f3628b031d050002031d05000104012a0001000000009800000000000000f805000000000050641c65888b6b418f2257061a9dc3c50202040436005c004500460049005c00640065006200690061006e005c007300680069006d0061006100360034002e0065006600690000007fff0400" + - EventNum: 8 + PCRIndex: 1 + EventType: Unknown event type + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "50eb6bd100c48e81644d666a437725f49c6aed3c" + - AlgorithmId: sha256 + Digest: "eb881b78feeb95756141a8d5358b891b297fad61b296f667de1f59b66bc92f4f" + EventSize: 52 + Event: "61dfe48bca93d211aa0d00e098032b8c0900000000000000020000000000000042006f006f0074004f0072006400650072000000" + - EventNum: 9 + PCRIndex: 1 + EventType: Unknown event type + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "74e29a0674816dfdfc2c2e9cbbcec357132da4e8" + - AlgorithmId: sha256 + Digest: "5d123811f51e2e46c437c7e88e07053c81f4e63da3f11de8a8a2afb5334db137" + EventSize: 198 + Event: "61dfe48bca93d211aa0d00e098032b8c0800000000000000960000000000000042006f006f0074003000300030003000010000008200640065006200690061006e00000001041400b9731de684a3cc4aaeab82e828f3628b031d050002031d05000104012a0001000000009800000000000000f805000000000050641c65888b6b418f2257061a9dc3c50202040436005c004500460049005c00640065006200690061006e005c007300680069006d0061006100360034002e0065006600690000007fff0400" + - EventNum: 10 + PCRIndex: 4 + EventType: EV_EFI_ACTION + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "cd0fdb4531a6ec41be2753ba042637d6e5f7f256" + - AlgorithmId: sha256 + Digest: "3d6772b4f84ed47595d72a2c4c5ffd15f5bb72c7507fe26f2aaee2c69d5633ba" + EventSize: 40 + Event: 'Calling EFI Application from Boot Option' + - EventNum: 11 + PCRIndex: 0 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 12 + PCRIndex: 1 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 13 + PCRIndex: 2 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 14 + PCRIndex: 3 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: +"df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 15 + PCRIndex: 4 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 16 + PCRIndex: 5 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 17 + PCRIndex: 6 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 18 + PCRIndex: 7 + EventType: EV_SEPARATOR + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "9069ca78e7450a285173431b3e52c5c25299e473" + - AlgorithmId: sha256 + Digest: "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119" + EventSize: 4 + Event: "00000000" + - EventNum: 19 + PCRIndex: 5 + EventType: EV_EFI_ACTION + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "443a6b7b82b7af564f2e393cd9d5a388b7fa4a98" + - AlgorithmId: sha256 + Digest: "d8043d6b7b85ad358eb3b6ae6a873ab7ef23a26352c5dc4faa5aeedacf5eb41b" + EventSize: 29 + Event: 'Exit Boot Services Invocation' + - EventNum: 20 + PCRIndex: 5 + EventType: EV_EFI_ACTION + DigestCount: 2 + Digests: + - AlgorithmId: sha1 + Digest: "475545ddc978d7bfd036facc7e2e987f48189f0d" + - AlgorithmId: sha256 + Digest: "b54f7542cbd872a81a9d9dea839b2b8d747c7ebd5ea6615c40f42f44a6dbeba0" + EventSize: 40 + Event: 'Exit Boot Services Returned with Success' +pcrs: + sha1: + 0 : 0x3e26be54f5f15140afbe509cc4580538d979598d + 1 : 0x5b4c188c39baa249f688460a63b68df6d3d3ec94 + 2 : 0xb2a83b0ebf2f8374299a5b2bdfc31ea955ad7236 + 3 : 0xb2a83b0ebf2f8374299a5b2bdfc31ea955ad7236 + 4 : 0x260ae65533f38ab643f157bd176c72f9fdece410 + 5 : 0xd16d7e629fd8d08ca256f9ad3a3a1587c9e6cc1b + 6 : 0xb2a83b0ebf2f8374299a5b2bdfc31ea955ad7236 + 7 : 0x518bd167271fbb64589c61e43d8c0165861431d8 + sha256: + 0 : +0x7f35c1ef1bb7b9d2aee58ec4c36cf384d70524c2ce2b6801772d7fdb1d2b5f5a + 1 : 0xf35b74319598e48a6a69a6a04e903a872558b891563b0af23c877c8472c277a6 + 2 : 0x3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969 + 3 : 0x3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969 + 4 : 0xbe4d7464e3a3c0a04040355368006a5fbe02c0ef232c8c18926df9b718374f36 + 5 : 0xa5ceb755d043f32431d63e39f5161464620a3437280494b5850dc1b47cc074e0 + 6 : 0x3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969 + 7 : 0x65caf8dd1e0ea7a6347b635d2b379c93b9a1351edc2afc3ecda700e534eb3068 +``` + +For more information on the work Linaro does in securing edge devices, go to our [Trusted Substrate project page](https://linaro.atlassian.net/wiki/spaces/TS/overview). Trusted Substrate is an integrated firmware solution made of all necessary components to implement Arm SystemReady standards with more security options turned on. diff --git a/src/content/blogs/how-to-set-up-vs-code-for-llvm-development.mdx b/src/content/blogs/how-to-set-up-vs-code-for-llvm-development.mdx new file mode 100644 index 0000000..ad85c7d --- /dev/null +++ b/src/content/blogs/how-to-set-up-vs-code-for-llvm-development.mdx @@ -0,0 +1,86 @@ +--- +title: "How to set up VS Code for LLVM development " +description: > + In this blog, Senior Engineer Omair Javaid describes how to set up VS Code for + LLDB/LLVM development on remote Linux and Windows machines. More importantly + this how-to guide will help you set up native LLVM toolchain for C++ + development on Windows on Arm platform. +date: 2021-11-08T01:36:21.000Z +image: linaro-website/images/blog/code_highway-2- +tags: + - windows-on-arm + - open-source +author: omair-javaid +related: [] + +--- + +## Introduction + +Visual Studio Code is a highly configurable IDE and nearly all its settings and user interface can be modified according to most use-cases. It provides the ability to extend its features through third-party extensions which help add support for languages, debuggers, and various tools required for most development workflows. VS Code IDE provides an extensive set of extensions for editing and debugging C++ applications. Most importantly it can be used for LLVM development on remote machines seamlessly providing local quality development experience. +In the past, most of our LLVM development was targeted for Arm/Linux and the command line mostly deemed enough with occasional use of Eclipse IDE mostly for its C++ indexing. This changed when we started developing LLVM support for Windows on Arm and needed native Windows IDE that can support remote LLVM development with relatively fast indexing capabilities, a very good GIT integration, out of the box terminal support etc. We also wanted to enable integration of various LLVM tools with visual studio to facilitate developers using a Windows on Arm machine. + +This blog describes how to set up VS Code for LLDB/LLVM development on remote Linux and Windows machines. More importantly this how-to will help you setup native LLVM toolchain for C++ development on Windows on Arm platform. + +![Visual Studio Image](/linaro-website/images/blog/visual-studio-image-1) + +## Installation setup + +The steps described in this document are tested to run on Ubuntu 18.04, however they are equally applicable on any platform that VS Code supports. [Download and install the latest version of visual studio code](https://code.visualstudio.com/download). For LLVM development workflow, we will install the following additional VS Code extensions: + +* **ms-vscode.cpptool**s Adds C/C++ language support +* **ms-python.python** Adds Python language support +* **ms-vscode.cmake-tools** Adds Cmake based configuration support +* **xaver.clang-format** Adds clang-format support +* **ms-vscode-remote.remote-ssh** Adds support to use any remote machine for development providing close to local quality development experience. + +Extensions can be installed in the following ways: + +* **From Windows CMD or Linux terminal** by using: `code --install-extension ` +* **From VS Code Quick Open** (Ctrl+P) and `ext install ` +* **From VS Code marketplace** GUI (Ctrl + shift + X) and search/install all required extensions. + +![Visual Studio Installation Set Up Image](/linaro-website/images/blog/visual-studio-installation-set-up) + +## Setting up remote connection + +VS Code enables you to open remote workspace folders on local machines, allowing [a seamless development experience](https://code.visualstudio.com/docs/remote/ssh). We need to have a working SSH (Secure Shell) connection between the host and [Windows](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse) or Linux remote machines. + +If a working SSH connection has been set up and SSH configuration has been written to .ssh/config, we can go ahead and connect to the remote workspace by opening VS Code Quick Open (Ctrl + Shift + P) and Remote-SSH: Connect to host option. + +![Setting up remote connection image 1](/linaro-website/images/blog/setting-up-remote-connection-1) + +We will get a drop down list of all the hosts configured in .ssh/config file. Alternatively, VS code also provides an option “Add new ssh host” which will take remote ssh information from the user and write appropriate ssh config for later use. + +![Setting up remote connection image 2](/linaro-website/images/blog/setting-up-remote-connection-2) + +## Setting up LLVM development + +In most use-cases we will open our project source code directory in VS Code which will serve as our [VS Code workspace](https://code.visualstudio.com/docs/editor/workspaces). VS Code workspace root directory contains a .vscode folder that hosts the user configured workspace specific settings for the current project. These settings include editor, compiler, debugger and various other use-case specific configuration files written in JSON or YAML. +For the purpose of LLVM development I have written configuration files required for configuring, building and running LLVM using cmake, compiler settings for various target platforms and debugger configurations. These configurations can be found in .vscode in [this github repository](https://github.com/omjavaid/llvm-dev/). Download the .vscode folder containing LLVM JSON configs and include it into your llvm-project root directory. A brief description on working of each of these files below: + +* [cmake-kits.json](https://github.com/omjavaid/llvm-dev/blob/master/.vscode/cmake-kits.json) [A cmake-kit file](https://vector-of-bool.github.io/docs/vscode-cmake-tools/kits.html) allows for writing project specific compiler configurations for various use cases. +* [cmake-variants.json](https://github.com/omjavaid/llvm-dev/blob/master/.vscode/cmake-variants.json) [cmake-variants file](https://vector-of-bool.github.io/docs/vscode-cmake-tools/variants.html) contains a set of project specific build configurations called variants. These are mostly environment variables and configuration flags passed to cmake. +* [launch.json](https://github.com/omjavaid/llvm-dev/blob/master/.vscode/launch.json) Contains [debug configuration information](https://code.visualstudio.com/docs/editor/debugging) for the current project. +* [settings.json](https://github.com/omjavaid/llvm-dev/blob/master/.vscode/settings.json) is a project specific settings file used to configure VS code and its installed extensions. It may contain settings like build folder path, source code path etc. It also contains editor settings like tabs width, color schemes, themes etc. +* [tasks.json](https://github.com/omjavaid/llvm-dev/blob/master/.vscode/tasks.json) contains a set of instructions to automate various use-cases like build, run and test. Tasks are like scripts that can run commands and do sequential steps. +* [clang-aarch64-linux.cmake](https://github.com/omjavaid/llvm-dev/blob/master/.vscode/clang-aarch64-linux.cmake) Adds cmake toolchain file for supporting cross compilation for AArch64/Linux targets on non-native Linux hosts. +* [clang-armhf-linux.cmake](https://github.com/omjavaid/llvm-dev/blob/master/.vscode/clang-armhf-linux.cmake) Adds cmake toolchain file for supporting cross compilation for Arm/Linux targets on non-native Linux hosts. + +These configuration files are still under development and will keep on evolving as we configure VS code to adapt to various use-cases of LLVM development. +Steps below describe remote LLVM development using VS Code on a remote WoA/Linux machine. + +1. Download and install relevant VS Code extensions as described above. +2. [Download this .vscode folder](https://github.com/omjavaid/llvm-dev/) and copy it into your workspace directory. +3. Clone llvm-project repository in your workspace folder. +4. Launch VS code by running: `code ` from terminal/cmd + +cmake-variants.json describes various combinations of release/debug build configurations to select from. We have written our platform custom variant configurations to cater for Linux host, WoA hosts and cross compile for linux targets. Also two separate variant configurations are written for lldb-server and generic LLVM. An appropriate variant combination can be selected by using VS Code quick open (Ctrl + Shift +P) > CMake: Select variant. + +![Setting up remote connection image 3](/linaro-website/images/blog/setting-up-remote-connection-3) + +VS Code auto detects available compiler options in the current system and makes them available to users via CMake kits. We have added custom compiler kits (cmake-kits.json) to configure clang-cl based compilation for WoA host and Arm/AArch64 Linux cross compilation mostly needed to cross-compile lldb-server executable. An appropriate compiler kit can be selected by using VS Code quick open (Ctrl + Shift +P) > CMake: Select kits. + +![Setting up remote connection image 4](/linaro-website/images/blog/setting-up-remove-connection-4) + +Once we have selected the appropriate variant and kit we can select a build target from VS Code quick open (Ctrl + Shift +P) > CMake: Set build target. In most cases our build target will be set to “all”. diff --git a/src/content/blogs/how-to-set-up-windows-on-arm-for-llvm-development.mdx b/src/content/blogs/how-to-set-up-windows-on-arm-for-llvm-development.mdx new file mode 100644 index 0000000..c9cec37 --- /dev/null +++ b/src/content/blogs/how-to-set-up-windows-on-arm-for-llvm-development.mdx @@ -0,0 +1,187 @@ +--- +title: How to set up Windows on Arm for LLVM development +description: " In this blog, Diana Picus, David Spickett, Maxim Kuvyrkov and + Omair Javaid from Linaro's Toolchain Working Group provide a step by step + guide on how to set up Windows on Arm for LLVM development. " +date: 2022-02-16T10:52:19.000Z +image: linaro-website/images/blog/llvm-image +tags: + - windows-on-arm + - open-source +author: diana-picus +related: [] + +--- + +Co-authored-by: David Spickett, Maxim Kuvyrkov and Omair Javaid + +At Linaro, we are working on developing, testing and releasing LLVM for the Windows on Arm (WoA) platform. This work is happening as part of [Linaro’s Windows on Arm project](https://linaro.atlassian.net/wiki/spaces/WOAR/overview). Together with Arm, Qualcomm and Microsoft, we are aiming to establish a healthy self-sustaining Arm open source ecosystem for Windows. This involves enabling open source tools and applications such as LLVM to run natively on Windows on Arm. + +LLVM is a compiler infrastructure known, among other things, for its highly modular structure. If you're looking for a good C/C++ compiler for WoA, you can try to use clang from one of [the official releases](https://releases.llvm.org/) on LLVM's GitHub page. However, if you're tempted to hack on it yourself, you can follow the instructions on this page to get up and running. + +The hardware we have at the moment is Microsoft Surface X Pro laptops, with Windows 10 Pro. This post describes the steps that we have taken in order to prepare our machines to compile LLVM and some of its subprojects. Some of these machines are used as part of the upstream LLVM CI (see the [Windows buildbots section here](http://llvm.validation.linaro.org/)) and also for building the official release binaries for Windows on Arm. Your mileage may vary on other hardware or other Windows versions, but hopefully this post will still be useful as a starting point or reference. + +We are going to cover installing all the tools and dependencies needed, cloning LLVM, building it from source and running the tests. But first, a few words about the environment: + +## Adding to the PATH + +For all the steps below you’ll need to put things on the PATH, if an installer doesn’t do it for you. To do that, open the start menu, type “environment” and open the link to “System Properties”. Once there, click “Environment Variables” to see a GUI for editing them. + +**Note**: Just like in Unix, adding to the path doesn’t refresh active terminals, and there is no way to refresh an active terminal. So you need to open a fresh terminal after changing environment variables. + +After following this guide your PATH should have these extra entries: + +* C:\Users%USERNAME%\AppData\Local\Programs\Python\Python39-32\Scripts + +* C:\Users%USERNAME%\AppData\Local\Programs\Python\Python39-32 + +* C:\Program Files (x86)\Git\usr\bin + + * This is for mingw utilities. Git adds itself to System PATH + +* C:\Users%USERNAME%\source\ninja + +* C:\Program Files\LLVM\bin + +* C:\Program Files (x86)\CMake\bin + +For building release packages we also install [7-Zip](https://www.7-zip.org/download.html) and [NSIS](https://nsis.sourceforge.io/Download). + +* C:\Program Files (x86)\7-Zip +* C:\Program Files (x86)\NSIS. + +## Install Visual Studio Build Tools + +**Note:** Microsoft Visual Studio 2022 has been released but the installer is a 64-bit application requiring Arm64 emulation support. Arm64 emulation is generally available for Windows 11 but only a preview build is available for Windows 10. + +Build Tools is the command line portion of Visual Studio. You can download it from [Download Visual Studio Tools - Install Free for Windows, Mac, Linux](https://visualstudio.microsoft.com/downloads/) (“Tools for Visual Studio 2019” → “Build Tools for Visual Studio 2019”). An important part of this is the VsDevCmd.bat script, which sets up the environment so that you can use the Build Tools. + +The Visual Studio installer has many options, but generally, you want various things to do with C++ desktop development for **ARM64** (we do not need ARM components, in this context ARM means 32-bit Arm). + +If you have build issues later, come back to the installer and add anything that seems relevant. Here’s the list of what is minimally needed, check this against “Individual Components” in your “Installation Details” panel: + +* MSVC v142 - VS 2019 C++ x64/x86 build tools (latest) + + * This is needed to make the VsDevCmd.bat script correctly setup LIB/LIBPATH/INCLUDE variables for x86->arm64 cross-compilation. All the lib/header files are actually present, but vsdevcmd.bat doesn’t add them without x86-hosted tools. Installing this may become optional in the future. + +* MSVC v142 - VS 2019 C++ ARM64 build tools (latest) + + * Without this, VsDevCmd.bat doesn’t setup x86->arm64 cross-compilation.C++ ATL for latest v142 build tools (ARM64) + +* LLVM needs ATL libraries for processing debug info. + +* Windows 10 SDK + + * Versions 18362 and 19041 are known to work. + * Version 20348 is known to cause LLVM build failure. + + * error: use of undeclared identifier '\_\_umulh' + +This is required for a correct cross-compilation setup. + +**Note**: There’s probably a choice in the installer for where to install to. If you do that, modify any instructions as needed. The default will look something like “C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\Common7\Tools\”. + +## Install MSVC Redist Libraries + +These libraries have to be installed in order to get msvcp140.dll, vcruntime140.dll, concrt140.dll and other DLLs. Without them, MSVC-built applications will not run; in an LLVM build this manifests itself as llvm-tblgen.exe not being able to start. + +Currently, these libraries are not installed by default as part of Windows or Visual Studio. You will have to search the filesystem for vc\_redist.arm64.exe, and you’ll most likely find it in “C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Redist\MSVC\v142\” . + +## Install the Latest LLVM for Windows on Arm + +Go to \*[Release LLVM 13.0.0 · llvm/llvm-project](https://github.com/llvm/llvm-project/releases/tag/llvmorg-13.0.0) and download the Windows on Arm (“woa64”) installer. Run it and if it asks to add llvm to the path say yes. If it doesn’t or you forget, you can always add the install directory to PATH yourself, as described above. + +## Install CMake + +Recent versions of Visual Studio ship cmake as an x86\_64 binary (in VS circa-2020.08 cmake was an x86\_32 binary). Instead install an i386 build from [Index of /files](https://cmake.org/files/) and add that to the PATH. +A host compiler of Clang 12 or 13 is known to work with CMake 3.17. +Clang 13 is known not to work with CMake 3.21 or 3.22. + +## Install Python + +To install Python, you can go to [Python.org](https://www.python.org/) and get a 32-bit x86 build of the latest Python3. Remember to tell the installer to add python variables to the environment (so that cmake can find python3). + +## Install Git + +Go to Git - [Downloading Package](https://git-scm.com/download/win) and get the latest 32-bit x86 installer. There is likely a copy of git in the VS Build Tools install, but we recommend installing a separate copy so that you also get the tools git for Windows is packaged with. These tools are used for testing llvm: + +``` +1 llvm-lit.py: <...>\llvm-project\llvm\utils\lit\lit\llvm\config.py:46: note: using lit tools: C:\Program Files (x86)\Git\usr\bin +``` + +You can get these tools by installing MSYS2 instead, but git for Windows is based on that so the end result is the same. + +## Build Ninja + +VS Build Tools does come with a ninja but the default one doesn’t run on WoA. You should build from source ([GitHub - ninja-build/ninja: a small build system with a focus on speed ](https://github.com/ninja-build/ninja)) using the cmake build method. +([Releases · ninja-build/ninja](https://github.com/ninja-build/ninja/releases) does provide prebuilt releases but at this time the Windows variant is x86\_64 only) + +## Testing the Install + +First, open a plain terminal “Command Prompt” (ignore the cross prompts shortcuts you might find in the start menu). +Then run VsDevCmd.bat to setup the environment. + +``` +1 "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\Common7\Tools\VsDevCmd.bat" -host_arch=x86 -arch=arm64 +2 <...> +3 "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\Common7\Tools\VsDevCmd.bat" -test +4 <...> +5 set PATH=C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\DIA SDK\bin\arm64;%PATH% +``` + +If the second test command fails, your build probably won’t work. If figuring out why it failed is difficult, go ahead and do a build anyway. The compiler’s errors will probably be more informative. + +**Note:** We setup the Visual Studio environment first because it adds some copies of cmake and ninja that we don’t want. By doing it first, then checking cmake and ninja, we know if our preferred versions are being used. + +**Note:** We have to add the arm64 DIA (debug information access) DLL onto the path otherwise it will find the x86\_32 version in BuildTools\Common7\IDE. + +Now check that cmake and ninja can run at all. + +``` +1 ninja --version +2 cmake --version +``` + +If you’ve made it this far - congratulations! You are now ready to clone and build LLVM. + +## Check out LLVM + +The git for Windows install will default to converting line endings to windows style. This applies to any file git thinks is ASCII, which includes some archive files used for llvm tests. As stated in [Getting Started with the LLVM System ](https://llvm.org/docs/GettingStarted.html#checkout-llvm-from-git)— LLVM 15.0.0git documentation , use the following to override this behaviour: +1 git clone --config core.autocrlf=false https://github.com/llvm/llvm-project.git + +## Doing a Build + +In the same command prompt where you have run VsDevCmd.bat as described above, set your compiler(s) to be the clang-cl.exe we installed earlier: + +``` +1 set "CC=clang-cl.exe" +2 set "CXX=clang-cl.exe" +``` + +Then make a folder next to your llvm checkout and from that folder: + +``` +1 cmake ..\llvm-project\llvm -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;lld;llvm" -DLLVM_ENABLE_ASSERTIONS=ON -DCMAKE_C_FLAGS="-fms-compatibility-version=19.14" -DCMAKE_CXX_FLAGS="-fms-compatibility-version=19.14" -DCMAKE_TRY_COMPILE_CONFIGURATION=Release -DLLVM_DEFAULT_TARGET_TRIPLE="arm64-pc-windows-msvc" -G Ninja +``` + +Some specifics: + +* ⚙ [D92515 Bump MSVC required version to 19.14](https://reviews.llvm.org/D92515) bumped llvm’s required MSVC version, ironically meaning that clang-cl version 12 and earlier can’t build it. That’s why we need the “-fms-compatibility-version" flag to have clang-cl pretend to be a newer MSVC. You don’t need to add the -fms-compatibility-version flag for clang-cl version 13 and later. +* A known issue with some versions of cmake is that it builds all try\_compile/try\_run as debug even if your selected build type is release. This is why we set “-DCMAKE\_TRY\_COMPILE\_CONFIGURATION=Release”. Not doing so causes a try\_run to fail to get error message strings, so lit defaults to Linux strings and many tests will fail. +* We set LLVM\_DEFAULT\_TARGET\_TRIPLE manually because the prompt we use is an x86 32 bit host prompt. There is no arm64 to arm64 prompt, so cmake detects the host/default target triple “correctly” but it’s not what we really want. + +Then build as usual with ninja. + +Here is where you might get errors about includes and linker directories and so on. The best way to solve this is to go back to the Visual Studio Installer and add anything that looks related. + +You will find things online about adding certain directories to your path but be careful because you could just be adding x86 libs. Installing Arm64 specific variants is almost always what you want. + +If you’re doing a debug build and you see linker errors about missing libs, check if the file names end with “d” (e.g. “foo.lib” would be “food.lib” for a debug build). To solve this, search for those files and add their location to PATH. They don’t appear to be automatically added by the installers. + +## Running the Tests + +Windows Defender likes to scan new files, including those that tests create. This changes their last accessed times and there’s one test known to fail because of this, “LLVM :: ThinLTO/X86/cache.ll”. To exclude the folder follow [Add an exclusion to Windows Security](https://support.microsoft.com/en-us/windows/add-an-exclusion-to-windows-security-811816c0-4dfd-af4a-47e4-c301afe13b26) and add `/test`. You shouldn’t need to restart Defender or the machine, it takes effect automatically. + +Running the tests should be as easy as ninja check-all (or a smaller target if you’re only interested in a specific subset of the tests). + +At the time of writing, there are some [known failures](https://linaro.atlassian.net/browse/WOA-130) when running the LLVM tests, which is why at the moment they are not run on the buildbots. Fixing those is a work in progress. If you would like to get involved, feel free to get in touch with [the Toolchain Workgroup at Linaro](https://lists.linaro.org/mailman3/lists/linaro-toolchain.lists.linaro.org/). diff --git a/src/content/blogs/how-to-use-lldb-to-debug-sve-enabled-applications.mdx b/src/content/blogs/how-to-use-lldb-to-debug-sve-enabled-applications.mdx new file mode 100644 index 0000000..73aac6b --- /dev/null +++ b/src/content/blogs/how-to-use-lldb-to-debug-sve-enabled-applications.mdx @@ -0,0 +1,143 @@ +--- +title: Using LLDB to Debug SVE Enabled Applications +description: In this blog, Linaro Engineer Omair Javaid talks us through how to + debug SVE enabled applications using LLDB. Read more here. +date: 2021-05-04T09:08:31.000Z +image: linaro-website/images/blog/tech_background_1 +tags: + - open-source + - arm + - hpc + - debugging + - linux-kernel +related_projects: + - LLVM +author: omair-javaid +related: [] + +--- + +Scalable Vector Extension (SVE) is an extension of the Arm v8-A AArch64 instruction set developed to target HPC workloads. The SVE extension introduces a new instruction set which operates on a set of vector and predicate registers. The main striking feature of SVE is its Vector Length Agnosticism (VLA) which practically means that it has 32 size-configurable vector registers called Z registers with a minimum length of 128 bits (16 bytes). The size of each of these Z registers can be increased in multiples of 128 bits upto a maximum of 2048 bits. Unlike traditional SIMD architectures which have a fixed vector register length, SVE only specifies a maximum vector length. This allows for use-case specific vector length configurations on the same hardware as well as on different architecture versions designed for target specific workloads. SVE VLA programming strives to use the same program binary to be run on any implementation of the architecture with different vector length configurations. + +SVE’s variable length vector registers have significant implications on how we implement target support in debuggers. Register access of variable sized registers requiring dynamic size update at run-time has never been supported for any targets in the past. After the introduction of SVE extension, Arm contributed debugger support in GDB debugger. Now Linaro has developed complete LLDB debugger support for SVE vector register access with dynamically changing vector lengths for different threads of the same binary. + +In the past year Linaro completed development and upstreaming of SVE support in LLDB debugger which is now available in the LLVM 12 release downloadable from [releases.llvm.org](https://releases.llvm.org/). This article describes how to use LLDB to debug SVE enabled applications with dynamically changing vector register size. + +Click [here](https://resources.linaro.org/en/resource/nG2VAJVkXiGRCjbDiVW3GX) for a talk on LLDB support in SVE from Linaro Connect SAN19 that provides detailed information on the SVE extension features and our initial plan for supporting SVE in LLDB. Linaro has also been involved in various other Arm architecture enablement projects in LLDB and GDB. You can get an insight on all these projects by watching [our presentation from Linaro Virtual Connect LVC21](https://www.youtube.com/watch?v=5xv5CMHiG2k). + +## QEMU virtual environment for LLDB SVE testing + +In the absence of real SVE hardware, QEMU AArch64 system mode emulation environment can be used for testing LLDB SVE support. For the purpose of this article we'll be using Ubuntu Linux 18.04 virtual machine. In order to facilitate LLDB testing using QEMU system mode emulation, we have upstreamed helper scripts under [llvm-project/lldb/scripts/lldb-test-qemu](https://github.com/llvm/llvm-project/tree/82f0e3d3ea6bf927e3397b2fb423abbc5821a30f/lldb/scripts/lldb-test-qemu). These scripts enable users to quickly set up a test environment. Click [here](https://lldb.llvm.org/use/qemu-testing.html) for detailed instructions on how to use these helper scripts for setting up an AArch64 SVE virtual machine. + +QEMU also has SVE support for Linux user-mode emulation. The Linaro blog [SVE in QEMU's linux-user mode](https://www.linaro.org/blog/sve-in-qemu-linux-user/) has details on how to utilize this feature for SVE debugging using GDB. + +## SVE compiler support + +[SVE example code](https://developer.arm.com/documentation/101726/0210/Coding-for-Scalable-Vector-Extension--SVE-/SVE-Vector-Length-Agnostic--VLA--programming/For-and-While-loop-vectorization) below adds two integer arrays using function add\_int\_arrays\_acle written using Arm C language extension (ACLE) for SVE. Compiler’s supporting SVE auto vectorization will auto generate similar SVE code without making use of ACLE intrinsics. + +LLVM clang compiler does not have auto vectorization in LLVM-12 release but can compile ACLE for SVE code. We may use any of the GCC 9.0 and onwards releases to generate SVE auto vectorization code. Click [here](https://www.google.com/url?q=https://developer.arm.com/tools-and-software/open-source-software/developer-tools/llvm-toolchain/architecture-support\&sa=D\&source=editors\&ust=1620130218065000\&usg=AOvVaw2ExP224MQGrxisgoXOr7bZ) for the release timeline of various SVE features in LLVM. + +``` +#include + +#define ARRAYSIZE 2048 + +int a[ARRAYSIZE]; +int b[ARRAYSIZE]; +int out[ARRAYSIZE]; + +void add_int_arrays_acle(int *out, int *a, int *b) {160 + uint64_t i = 0; + uint64_t vl = svcntw(); + svbool_t pred; + svint32_t sva, svb, svres; + + pred = svwhilelt_b32(i, (uint64_t)ARRAYSIZE); + + while (svptest_first(svptrue_b32(), pred)) { + sva = svld1(pred, &a[i]); + svb = svld1(pred, &b[i]); + svres = svadd_m(pred, sva, svb); + svst1(pred, &out[i], svres); + i += vl; + pred = svwhilelt_b32(i, (uint64_t)ARRAYSIZE); + } +} + +int main() { + add_int_arrays_acle(out, a, b); + return 0; +} +``` + +## LLVM Compiler options + +``` +clang -g -O3 -target aarch64-linux-gnu -march=armv8-a+sve +-I//usr/aarch64-linux-gnu/include +-I//usr/aarch64-linux-gnu/include/c++/8/aarch64-linux-gnu sve_add.c +``` + +## Debugging SVE add integer demo in LLDB + +### Step 1: Launch debug session of SVE executable and stop at breakpoint + +![Image of debug session of SVE executable being launched and stopping at breakpoint](/linaro-website/images/blog/image-of-debug-session-of-sve-executable-being-launched-and-stopping-at-breakpoint) + +### Step 2: LLDB is able to disassemble SVE specific instructions + +LLDB is able to disassemble instructions belonging to Arm v8.7a including SVE specific instructions. SVE specific code can be seen in the disassembly instructions below from address 0x4005e0 to 0x4005f8. + +![Image of disassembly instructions for SVE specific instructions](/linaro-website/images/blog/image-of-disassembly-instructions-sve) + +## How to use LLDB to debug multi-threaded SVE application + +LLDB can debug multi-threaded linux user applications where each thread has different size configured for Z, P and FFR registers. The following instructions will provide a step by step guide to compile and debug a multi-threaded application containing SVE code. + +### Step 1: Download and compile [this sample code](https://raw.githubusercontent.com/llvm/llvm-project/43ded90094f761a4763497773e722c196c69d17e/lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/main.c) from LLDB testsuite. + +![Sample code for LLDB test suite](/linaro-website/images/blog/sample-code-for-lldb-test-suite) + +### Step 2: Start LLDB for debugging application compiled above + +We can use the linux prctl interface with PR\_SVE\_SET\_VL flag to configure SVE vector length for a particular thread. The demo code in main.c configures SVE vector length 8 x 8 for main thread. It also creates two child threads and configures their vector length to 8 x 4 and 8 x 2 respectively. Let's start a debug session of our demo application using LLDB and examine configured vector lengths for each thread. + +Note: If running QEMU to debug SVE code QEMU “-cpu max,sve-max-vq=8” command line option will be needed to configure maximum vector length for the current cpu emulation. + +We spawn our QEMU system mode virtual environment and start lldb-server in platform mode. + +![Image of QEMU system mode virtual environment being spawned and LLDB server starting in platform mode](/linaro-website/images/blog/image-of-qemu-system-mode-virtual-environment-being-spawned-and-lldb-server-starting-in-platform-mode) + +Now that LLDB is running we will start a LLDB debug session with the executable compiled in step 1. We will set three breakpoints to stop each of the three threads after they have configured SVE vector length and written SVE registers. + +![Image of LLDB debug session with the executable compiled in step 1](/linaro-website/images/blog/image-of-lldb-debug-session-with-the-executable-compiled-in-step-1) + +Next we issue a run, ideally all three breakpoints will be hit and we will see execution stopped at all three breakpoints in three separate threads. In case any of the threads have not hit the breakpoints we can issue “(lldb) thread select \” followed by “(lldb) thread continue” to stop it at the intended breakpoint location. In the picture below all three threads have stopped at the intended breakpoint locations. + +![Image of all three threads having stopped at the intended breakpoint locations](/linaro-website/images/blog/image-of-all-three-threads-having-stopped-at-the-intended-breakpoint-locations) + +Now we can select individual threads and read sve registers to verify that each thread has its separate vector length configured and SVE Z and P registers are sized accordingly to their configured vector size. + +#### Select thread 1 vector length 8 + +![Image of thread 1 vector length 8](/linaro-website/images/blog/thread-1-vector-length-8) + +#### Select thread 2 with vector length 4 + +![Image of thread 2 vector length 4](/linaro-website/images/blog/thread-2-vector-length-4) + +#### Select thread 2 with vector length 2 + +![Image of thread 2 vector length 2](/linaro-website/images/blog/thread-2-vector-length-2) + +## Future plans + +### Hardware Testing + +SVE support in LLDB debugger has been tested under QEMU virtual environment. We will have access to SVE hardware in coming weeks and any bugs found during hardware testing will be fixed in future LLVM releases. + +### Platform Support + +SVE is currently supported on the AArch64/Linux platform, however in future support for Windows and other operating systems needs to be added. We are currently working on improving LLDB for Windows on Arm and will test/fix SVE support in Windows host - Linux target configuration. + +For more information on Linaro and the work we do, reach out to us on [our contact page](https://www.linaro.org/contact/). diff --git a/src/content/blogs/improving-audio-latency-in-android.mdx b/src/content/blogs/improving-audio-latency-in-android.mdx new file mode 100644 index 0000000..811c23a --- /dev/null +++ b/src/content/blogs/improving-audio-latency-in-android.mdx @@ -0,0 +1,39 @@ +--- +title: Improving Audio Latency in Android +description: How do you save power on a power-constrained system while always + being responsive? In this blog, Paolo Valente, Biagio Ferri and Davide Zini + talk about the work they have done to achieve this while improving audio + latency in Android. +date: 2021-12-07T01:43:00.000Z +image: linaro-website/images/blog/code +tags: + - android +related_projects: [] +author: paolo-valente +related: [] + +--- + +## Introduction + +It is not easy for a power-constrained system to save power and, at the same time, always be responsive. In fact, the most efficient way to save power is to keep components off as much as possible. For CPUs, this means keeping frequency as low as possible, and idle states as deep as possible. This power-saving policy leads to high latencies if CPU workloads happen to increase suddenly (then latency gets back under control as the system performance gets raised to a level compatible with the new load). + +With audio workloads, this issue can cause perceivable glitches and silence gaps. We have reproduced these problems deterministically with the [SynthMark audio emulator](https://github.com/google/synthmark), on a Qualcomm 845c Dragonboard, with a 5.10 kernel and the [Android Open Source Project](https://www.google.com/url?q=https://source.android.com/docs\&sa=D\&source=docs\&ust=1638889331613000\&usg=AOvVaw0rjd4u8LELLhmN-PycSn39) (AOSP) master. + +More importantly, we have also devised a solution to this problem. It proved remarkably effective with SynthMark workloads. Details follow. + +## Spotting the problem + +by Biagio Ferri and Davide Zini + +SynthMark is a benchmarking emulator that generates audio tracks and collects relevant parameters, including audio latency. We focused on a specific test: switch mode, a case where the workload switches repeatedly from low to high. This is the most stressful and unfriendly pattern for audio latency. Using the board's default CPUfreq Governor, schedutil, we got a latency around 12ms. On the opposite end, with the performance CPUfreq Governor (all CPUs at maximum frequency) and deepest idleState disabled, latency drops to 2ms (until thermal mitigation does not come into play). The high latency in default mode is evidently caused by the performance level being too low. + +## Adding utilClamp controller + +by Biagio Ferri + +Starting from SynthMark, after several consultations on possible hypotheses, it was decided to deal with the problem by implementing a jump-to-max and slowly-decrease policy, to choose the right value of frequency for handling a load increase as quickly as possible. This mechanism was implemented by modifying the utilClamp parameter, a parameter that allows a single process not to fall below a certain frequency threshold. In more detail, when an underrun for audio buffers is detected, utilClamp is pushed immediately to its maximum possible value. Then, as a load decrease is detected, utilClamp is decreased linearly in small steps, until the CPU frequency complies with the current load. + +With this solution in place, latency falls from 12ms to 6ms, which essentially eliminates glitches and silence gaps. And the system remains in the default power-saving mode. + +For more information on the work we are doing in audio latency in Android, have a look at [our contribution for SynthMark](https://github.com/google/synthmark/commit/0e0ce58bd04808970f1a4186ce3241e9035aca74), or, more in general, at [Linaro’s Power and Performance project](https://linaro.atlassian.net/wiki/spaces/PERF/overview). diff --git a/src/content/blogs/integrating-accelerated-video-decoding-with-v4l2-in-aosp.mdx b/src/content/blogs/integrating-accelerated-video-decoding-with-v4l2-in-aosp.mdx new file mode 100644 index 0000000..800a2c4 --- /dev/null +++ b/src/content/blogs/integrating-accelerated-video-decoding-with-v4l2-in-aosp.mdx @@ -0,0 +1,63 @@ +--- +title: Integrating Accelerated Video Decoding in AOSP +description: > + In this blog, Linaro Engineer John Stultz talks about the work Linaro has done + to integrate accelerated video decoding with v412 in AOSP. Read more here. +date: 2021-12-07T08:47:03.000Z +image: linaro-website/images/blog/technology-3389917_1920 +tags: + - android +author: john-stultz +related: [] + +--- + +Upstream enabled development boards are an extremely crucial tool for our work at Linaro, as they provide a platform to develop and validate new solutions, as well as allow us to demonstrate the value of those solutions to upstream maintainers, and also as a vehicle for testing to ensure external regressions are caught and quickly fixed. However, compared to shipping devices like phones, development boards are usually missing functionality. Often this is due to missing hardware, such as touch-panels, NFC, batteries, or fingerprint readers, but in some cases it's due to missing upstream kernel support for specific hardware functionality, or missing userland HALs to enable it. For the scope of much of our work, this is acceptable, as we can still do quite a bit of core kernel development and testing with only a basic amount of functionality. But there are still benefits to be had by enabling additional features on development boards, as in doing so, we may find bugs and limitations of existing frameworks, and it allows us to collaboratively work with others to solve and upstream generic solutions to the problems we find. + +One area that has commonly been missing on AOSP development boards is open implementations of accelerated video decoding. + +On prior AOSP development boards, this has mostly been due to missing upstream kernel support for video decoding IP. However, on the [Qualcomm Dragonboard 845c](https://source.android.com/setup/build/devices#845cdragonboard)/Robotics RB3 and [RB5 devices](https://www.96boards.org/product/qualcomm-robotics-rb5/), the Qualcomm Landing Team has managed to upstream kernel support for the Venus acceleration hardware, using standard kernel v4l2 interfaces. The Qualcomm landing team is an engineering team within Linaro which works closely with Qualcomm on upstreaming their platforms. + +While this got it working for classic Linux environments, AOSP was still missing out, as Android has its own media subsystem called Codec2, and it requires a vendor supplied HAL to glue its logic to the kernel drivers for hardware support. And most vendors shipping devices usually implement their own proprietary HALs, using out of tree, non-v4l2 kernel drivers. + +A while back Google did implement a v4l2\_codec2 HAL, to use the upstream kernels v4l2 interfaces. However, this HAL seems to have evolved out of efforts from the ChromeOS team, likely for use when running Android containers on ChromeOS. This means it included the extra complexity of additional abstractions in order to work on ChromeOS, and as a result it had a number of logic and build assumptions that prevented it from working properly on plain AOSP in the past. + +After a number of false starts, Amit Pundir and myself started taking a more serious look at integrating the v4l2\_codec2 HAL earlier this year. This also aligned with similar external efforts by both Kevin Hilman and Neil Armstrong from Baylibre as well as Dmitry Shmidt on the AndroidTV team, which let us collaborate on various build-issues and potential routes for moving forward. But it wasn’t until a major refactoring effort to the HAL by Google, that was released with Android12, that we were able to make any real progress. + +Unfortunately, the newly refactored code in Android12 didn’t work out of the box. But we were able to make further progress than before, and working with Google developers as well as other other community members, we uncovered a number of other bugs in the process. + +The first was a kernel issue with the v4l2 VIDIOC\_DQEVENT ioctl. Once diagnosed, I reported the problem to Arnd Bergmann who was able to quickly submit a fix, which has now [been merged](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=678d92b6126b9f55419b6a51ef0a88bce2ef2f20). + +Additionally, with the extra efforts working with the venus kernel driver, we uncovered a frequent boot issue, where the driver components would initialize in an unexpected order causing a crash. For this, Tadeusz Struck stepped in, diagnosed the issue and [submitted a patch upstream to fix it](https://lore.kernel.org/all/20211029214833.2615274-1-tadeusz.struk@linaro.org/). + +Another issue was found in the drm\_hwcomposer which was not handling multi-plane buffers properly. I submitted an initial fix, and Roman Stratiienko from the GloDroid project [merged a further improved fix for the problem](https://gitlab.freedesktop.org/drm-hwcomposer/drm-hwcomposer/-/commit/875f39793ff12f95cf8bd5c66addfa14b3cf01fb?merge_request_iid=160). + +Finally, with a few other hacks we managed for the first time to get video output from the v4l2\_codec2 HAL. This was exciting, but unfortunately that output looked like this: + +![Output from video decoding on AOSP](/linaro-website/images/blog/video-decoding-aosp-image-1) + +After much continued debugging (which included a hack to overwrite the venus output buffers with a hand-calculated frame to try to understand if the venus hardware was exporting the wrong format or if the display was not displaying it correctly), I realized that the gralloc allocator was setting the DRM\_FORMAT\_MOD\_QCOM\_COMPRESSED modifier flag on the dmabuf. + +This flag is something the display code understands so it expects the buffers to be compressed, but the v4l2 interfaces don’t manage to look at this flag, so the venus hardware was filling buffers with uncompressed frames, and then it was being misinterpreted by the display hardware which was interpreting the data as compressed. + +A workaround to disable setting the compression modifier on NV12 buffers was [merged in the minigbm gralloc project](https://chromium-review.googlesource.com/c/chromiumos/platform/minigbm/+/3265874). Additionally Stanimir Varbanov on the Linaro Qualcomm Landing Team has [submitted patches to introduce new format types so that v4l2 interfaces](https://lore.kernel.org/lkml/20210706124034.773503-1-stanimir.varbanov@linaro.org/) can distinguish between NV12 and QCOM\_COMPRESSED NV12, until the v4l2 interfaces can be extended to pass though the DRM buffer object modifiers. + +Pausing here for a moment, it’s good to appreciate how efforts like this - just trying to integrate something new - manages to uncover bugs and limitations all through the stack! And this shows the importance of further enabling new functionality on AOSP development boards, which increases test coverage uncovering issues and allowing us to quickly catch and fix future regressions. + +Now, back to v4l2\_codec2 HAL, where there are still a few remaining issues to fix: + +\#1: An AOSP build issue, where we need to set a few v4l2\_codec2 libraries as vendor\_availabe, as discovered by Dmitry Shmidt. This still needs review and feedback from the v4l2\_codec2 maintainers. + +\#2: The Codec2 framework wants to set up the encoded video input buffer size very early in the initialization of the infrastructure. So the v4l2\_codec2 logic sets the value, and then later queries the hardware to try to set the buffer size. Unfortunately the venus driver is a bit more picky and returns a different buffer size then what is requested. Ignoring the v4l2 API rules, the v4l2\_codec2 code does not use the kernel’s adjusted buffer size and sticks to its earlier requested size, which causes problems when importing buffers into the venus driver. Ideally the v4l2\_codec2 could change to use the kernel’s adjusted size, but at that point in the code it's too late for the Codec2 infrastructure. So some deeper rework of the v4l2\_codec2 initialization logic is needed. Alternatively, the venus driver seems to be overly picky here, so we may be able to change the driver to be less particular, which would avoid the issue. However, avoiding the issue would not work for hardware that really requires a specific input buffer size. + +\#3: With the venus driver successfully generating output frames, we’ve noticed that the output is a bit more glitchy than what we see with the software decoder. This may be due to issues in the venus kernel driver, or it may be due to incorrect cache flushing of the DMA’ed video buffers, causing accidental corruption. This will require further investigation. + +We are continuing to work with the Google developers to better understand what solutions would be acceptable, so it seems we are not too far away from having video decoding working properly with the Qualcomm Dragonboard 845c/Robotics RB3 and RB5 boards in AOSP. With similar efforts being done by others like Baylibre and the Glodroid project in the community, hopefully this will be generically working on other boards like the VIM3, RaspberryPi4 and more. + +![Video output from AOSP development board](/linaro-website/images/blog/video-output-from-aosp-development-board) + +Again, getting this far has really been a collaborative community effort thanks to a wide array of folks inside Linaro, the Qualcomm Landing Team, Google, and our community peers at Baylibre and the GloDroid project. + +Not only will this effort allow for improved testing and open the door to future work like enabling v4l2 encoding for camera video input or screen recording in AOSP, but it also provides a solid reference point, where we can work with vendors to compare the functionality of upstream kernel interfaces with the shipping vendor solutions. This allows us to better understand what changes the upstream kernel would require in order to migrate vendors to a generic upstream solution. That would allow vendors to stop having to spend effort to maintain and forward port their out of tree solutions, letting them focus on more valuable features of their IP. + +For more information on this work, go to our [Software Device Enablement for Android project page](https://linaro.atlassian.net/wiki/spaces/LCGSC/pages/15697806250/Software+Device+Enablement+for+Android). diff --git a/src/content/blogs/io-bandwidth-management-for-production-quality-services.mdx b/src/content/blogs/io-bandwidth-management-for-production-quality-services.mdx new file mode 100644 index 0000000..e31f24f --- /dev/null +++ b/src/content/blogs/io-bandwidth-management-for-production-quality-services.mdx @@ -0,0 +1,173 @@ +--- +title: I/O-bandwidth management for production-quality services +description: This article guides us through techniques used to guarantee I/O + bandwidth to clients, containers, virtual machines & other type of entities + accessing shared storage. +date: 2019-03-05T09:00:00.000Z +image: linaro-website/images/blog/servers-cern +tags: + - arm + - linaro-connect + - linux-kernel +author: paolo-valente +related: [] + +--- + +I/O control is the most powerful Linux solution for guaranteeing bandwidths with storage; but the most used I/O-control mechanism, throttling, can waste up to 80% of the storage speed, and fails to provide target guarantees with some common workloads (full details here [IO-control-issues](https://lwn.net/Articles/763603/)). + +So, how do providers of production-quality services currently guarantee bandwidth to clients, containers, virtual machines and so on? Do they use also alternative solutions (to throttling)? If so, do these alternatives reach higher utilizations of storage resources? + +In this article we try to answer these questions, by surveying (hopefully all) typical solutions. In particular, we will see that yes, service providers do use other solutions too, but no, alternatives definitely do not reach higher utilizations, except for when confronted with very friendly workloads. The most effective alternative, dedicated storage, may easily discard more than 90% of the available speed. + +We complete this survey by summing up the results already obtained for throttling [low-limit](https://lkml.org/lkml/2017/1/14/310) and the *BFQ* (Budget Fair Queueing) I/O scheduler [bfq-doc](https://www.kernel.org/doc/Documentation/block/bfq-iosched.txt) in [IO-control-issues](https://lwn.net/Articles/763603/). + +# Minimum, maximum and average bandwidth + +Let's start by describing how a production-quality service looks like, in terms of bandwidth guarantees. To this purpose, we will use one of the most widespread services as an example. Quick note: for brevity, we will mention only *clients*, to refer to any entity competing for storage (network in the following example), such as also containers or virtual machines. + +Unless you are reading a printed or cached copy of this article, you are using an Internet connection in this very moment. If your Internet-service contract is good, it provides you with a minimum guaranteed bandwidth. But, more importantly, if the quality of the service is truly good, then most of the time you enjoy an average bandwidth that is much higher than that minimum bandwidth. Probably you chose your service provider basing mainly on the average bandwidth it delivers. Finally, a service contract provides for a maximum bandwidth, which basically depends on how much you pay. + +These same facts hold for virtually any service where storage I/O is or may be involved: WEB hosting, video/audio streaming, cloud storage, containers, virtual machines, entertainment systems, ... + +The key feature of a good Internet service, average bandwidth, is high because of the following facts. First, only part of the total clients are active at the same time, and active clients use only a fraction of their available bandwidth on average. In contrast, the total bandwidth is sized so as to guarantee the above minimum per-client bandwidth, in the worst case of maximum total demand. So service providers use traffic-control mechanisms to redistribute the unused total bandwidth in such a way that each client gets a high average bandwidth. + +How is such a standard, production-quality service scheme guaranteed when storage is involved? In particular, how effectively is unused storage speed redistributed among clients? + +# A simple storage example + +To evaluate existing solutions for guaranteeing the above service scheme, we will use as a reference a very simple, yet concrete example. 16 clients, each issuing read requests, served by a system with the following characteristics: + +* a PLEXTOR PX-256M5S SSD as storage device, with an *ext4* filesystem; + +* a 2.4GHz Intel Core i7-2760QM as CPU, and 1.3 GHz DDR3 DRAM; + +* Linux 4.18 as kernel, and *BLK-MQ* (the new multi-queue block layer \[blk-mq]) as I/O stack (Ubuntu 18.04 as distribution, although this parameter should have no influence on the results); + +* no I/O policy enforced to control I/O, and *none* used as I/O scheduler (same results with *MQ-DEADLINE* or *KYBER*). + +We assume that, over time, clients can issue either random or sequential read requests. + +In such a system, the read peak rate of the SSD fluctuates between \~160MB/s and \~200MB/s with random I/O, while it is equal to \~515MB/s with sequential I/O. In addition, a single thread doing synchronous 4KB random reads reaches a throughput of \~23MB/s, while a single thread doing sequential reads reaches about 400MB/s. + +We consider only reads, as they are the simplest type of I/O for which throughput and loss-of-control problems occur. With writes, both problems get worse. Writes: + +* tend to starve reads, because of OS-level and drive-level issues; + +* reduce throughput because they are slower than reads; + +* induce occasional very high latencies in drives. + +We assume that clients have to be treated equally. So, since the device reaches a total throughput of at least 160MB/s in the worst case, and clients are 16, each client can be guaranteed at least a minimum bandwidth of 10MB/s. + +# Complete failure with no I/O policy enforced + +The throughput reached while serving these clients is reported in Figure [clients-no-control](/assets/images/content/throughputs-no-control-bw-table.png), for the following mix of client I/O: + +* one client, called *target*, doing random 4KB reads; + +* all the other clients, called *interferers*, doing sequential reads. + +![Throughputs in case of no I/O control](/linaro-website/images/blog/throughputs-no-control-bw-table)**Throughputs in case of no I/O control** + +The figure reports five plots, for decreasing total numbers of active clients (this test has been executed with the `bandwidth-latency` benchmark in the *S suite* [S-suite](https://github.com/Algodev-github/S)). The leftmost plot shows that the total throughput is close to the read peak rate of the device if all 16 clients are active. But the target gets practically zero throughput! + +The reason is that sequential I/O is favored by both the OS, mainly by dispatching very large I/O requests for the sequential readers, and by the in-drive I/O scheduler, by letting sequential I/O almost always cut in front of random I/O (because sequential I/O makes the drive reach its highest-possible speed). The problem remains serious with 7 or even just 3 sequential readers. + +# List of common solutions for guaranteeing a minimum bandwidth + +The above failure highlights that, without countermeasures, serious bandwidth problems may occur. These problems motivate the following, hopefully exhaustive, list of solutions (two of the following items have been the focus of my previous article [IO-control-issues](https://lwn.net/Articles/763603/), see the end of this section): + +1. Limit throughput of bandwidth hogs + +2. Use the proportional-share policy with the *CFQ*(Completely Fair Queueing) I/O scheduler [io-controller](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt), and reduce the weight of bandwidth hogs + +3. Use the throttling I/O policy with *low limits* [low-limit](https://lkml.org/lkml/2017/1/14/310) + +4. Use dedicated storage + +5. Use the proportional-share policy with the *BFQ* I/O scheduler [bfq-doc](https://www.kernel.org/doc/Documentation/block/bfq-iosched.txt) + +This list does not include the newly proposed *I/O latency* cgroups controller [io-lat-controller](https://lwn.net/Articles/758963/), because the latter is not aimed at guaranteeing per-client bandwidths. + +These solutions are meant to guaranteeing a minimum per-client bandwidth, and of delivering a hopefully higher average per-client bandwidth. As for the third problem, namely limiting the maximum bandwidths of clients on a per-contract basis, it can be solved naturally by adding per-client throttling on top of any of the above solutions. For brevity, we discuss the final, complete result only for the two most cost-effective solutions above: low limits and proportional share over *BFQ*. + +We analyze solutions separately, but some of them could be combined together. + +The *low limits* and *bfq* solutions have been already analyzed in depth in my previous article [IO-control-issues](https://lwn.net/Articles/763603/), in terms of total throughput and of minimum bandwidth guaranteed to each client. In this article we summarize results for these solutions, and, most importantly, we relate the success/failure of these solutions in reaching a high total throughput, which in itself may be of no interest for a service provider, with the success/failure in guaranteeing a high average bandwidth to each client. + +# Limit throughput of bandwidth hogs + +The Linux throttling I/O policy [io-controller](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt) allows a maximum-bandwidth limit, *max limit* for brevity, to be set for the I/O of any group of processes. + +Max limits are the most used I/O-control mechanism for addressing bandwidth issues: offended clients, such as the target in Figure [clients-no-control](/assets/images/content/throughputs-no-control-bw-table.png), are given back their expected bandwidths, by detecting and limiting bandwidth hogs with max limits. + +Unfortunately, if/when bandwidth hogs actually use much less bandwidth than their max limit, the bandwidth that they leave unused cannot be reclaimed by other active groups. Thus max limits are not a good solution for delivering high average bandwidths when some clients are inactive. One may think of changing max limits dynamically, to maximize per-client average bandwidths. Indeed, this is exactly what the *low limits* mechanism does, as explained in Section (#sec:Throttling with low limits). + +In addition, the effectiveness of max limits is rigidly tied to the workload at hand. If some characteristic of the workload changes, e.g., the hog moves elsewhere, minimum-bandwidth guarantees may be lost. Finally, max limits also suffer from loss of control with writes, as shown in my previous article [IO-control-issues](https://lwn.net/Articles/763603/). + +On the opposite end, max limits find their natural use in limiting maximum bandwidths on a per-contract basis, as discussed in the description of a complete solution. + +# Proportional-share policy on *CFQ* + +The other I/O policy available in Linux, proportional share [io-controller](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt), expects each group to be associated with a weight, and targets per-group weighted fairness. + +In legacy *BLK* (the legacy, single-queue block layer), this policy is implemented by the *CFQ* I/O scheduler, which, in its turn, guarantees time fairness: each group is granted access to storage for a fraction of time proportional to the weight of the group. So, to reduce the problems caused by a bandwidth hog, an administrator can reduce the weight of the hog and/or increase the weights of the suffering clients. + +This solution cannot provide definite minimum-bandwidth guarantees (e.g.: at least 10MB/s for each client). Yet, the most serious problem is that *CFQ* fails to control bandwidths with flash-based storage, especially on drives featuring command queueing. It is then used mainly to mitigate bandwidth issues with rotational devices. + +# Throttling with low limits + +Because of the above throughput drawbacks of max limits, an opposite, still experimental, *low limit* mechanism has been added to the throttling policy [low-limit](https://lkml.org/lkml/2017/1/14/310). If a group is assigned such a low limit, then the throttling policy automatically, and dynamically, limits the I/O of the other groups in such a way to guarantee to the group a minimum bandwidth equal to its assigned low limit. + +Unfortunately, as shown in detail in [IO-control-issues](https://lwn.net/Articles/763603/), this mechanism easily throws away about 80% of the available storage speed, and fails to guarantee limits themselve. In particular, this happens with heterogeneous workloads, i.e., with mixes of, e.g., random and sequential I/O, and/or read and writes. In contrast, low limits are extremely effective with purely random workloads, for which they reach 100% of the storage speed. + +Low limits have a hard time controlling I/O, intuitively, for the same reasons why we often have a hard time getting the shower temperature right. The quantities to control, namely group bandwidths, vary non linearly, and with variable delays, with respect to the changes of per-group max limits performed by the mechanism. + +In the end, low limits are a little effective solution, in general, for getting high per-client average bandwidths. But they are extremely effective in case of homogenous workloads, especially if made of totally or mostly random I/O. + +# Dedicated storage + +Service providers often devote a pool of high-performance storage units only to the I/O of the clients. A distributed filesystem may glue units together. + +Such a dedicated storage can be made as reliable as desired in guaranteeing minimum bandwidths and providing high average bandwidths, without any I/O control. In fact, by properly sizing the number and the speed of the storage units, the utilization of each unit can be made so low that, while there is pending I/O for a given client, so little I/O from other clients may be pending, or arrive and possibly cut in front, that the I/O of the client will happen to be served as quickly as desired. + +Thus the main performance parameter for this solution is the utilization that can be reached without breaking bandwidth guarantees. To evaluate this parameter, we start by noting that an administrator typically controls the load on each unit by deciding the number of clients served by that unit. + +With a low number of clients, a high utilization can be reached only if all or most clients do sequential I/O. In contrast, in Figure [clients-no-control](/assets/images/content/throughputs-no-control-bw-table.png), with the random I/O of the target served alone, the device reaches less than 7% of the throughput it reaches with just two clients. + +# Proportional-share policy on *BFQ* + +In *BLK-MQ*, the proportional-share policy is implemented by the *BFQ* I/O scheduler [bfq-doc](https://www.kernel.org/doc/Documentation/block/bfq-iosched.txt). Differently from low limits, *BFQ* reliably guarantees target minimum bandwidths. As for throughput, *BFQ* reaches about 90% of the storage speed in the worst-case, namely for workloads made of purely random I/O. Thus *BFQ* seems an effective solution for providing each client with a high average bandwidth. + +*BFQ* is however overcome by low limits for purely random I/O, for which low limits reach 100% of the speed. Still, not reaching full utilization may be little relevant in production-quality environments. For reliability, storage is typically redundant in these environments, and no single storage unit is fully utilized, so as to mitigate service degradation when some unit fails. + +Main problems arise with very fast storage. The above 10% loss of throughput of *BFQ* is due to a higher execution overhead than low limits. This overhead becomes a barrier to speeds above 400 KIOPS, on commodity CPUs [bfq-doc](https://www.kernel.org/doc/Documentation/block/bfq-iosched.txt). Work is in progress on addressing this issue. + +# A complete, general solution + +Low limits and proportional share, enforced by *BFQ*, are evidently the most general solutions for guaranteeing minimum bandwidths. To get a complete solution, maximum bandwidths must be enforced too. This can be done by just adding max limits on top of low limits or of proportional share, with the following final result. + +### Minimum bandwidth: + +* Reliably guaranteed by low limits with homogenous workloads, or by *BFQ* with any workload. + +### High average bandwidth: + +* Fully reached by low limits with homogeneous workloads, or quasi-optimally reached by *BFQ*, because *BFQ* + * keeps throughput in the range 90-100% of the available speed; + * systematically distributes throughput among the only active clients according to their weights. + +### Maximum bandwidth: + +* Limited with max limits. Should limits be so low to cause losses of throughput during light-load periods, that would actually be a consequence of the commercial strategy of the provider, and not of intrinsic problems of the mechanism. + +# Conclusion + +Current solutions for guaranteeing I/O bandwidths can be compared to drivers of buses that carry passengers belonging to different groups. For each ride, these bus drivers are able to select passengers so as to guarantee that at least a given minimum number of persons per group get to their destinations every hour. + +With *homogeneous* passengers, buses can run full. But, with general mixes of passengers, buses will run almost empty, with no more than 10-20% of the seats occupied. So a lot of buses are needed to tranport all the daily passengers. In addition, in some situations these bus drivers choose incorrectly, and end up not carrying enough people per hour for some unlucky group. + +There is now a new bus driver, *BFQ*, who can finally drive buses with 90-100% of the seats occupied, and with any mix of passengers always correctly selected. So, in general,*BFQ* enables all the daily passengers to be moved using five to ten times less buses than those needed previously. On the downside, *BFQ* cannot reach full seat utilization with some types of passengers, and cannot drive next-generation super fast buses (there is development to try to improve on this front). + +So, the future of I/O management mostly depends on which bus drivers companies will prefer to entrust their vehicles to... diff --git a/src/content/blogs/latest-support-for-debian-openembedded-releases-for-qualcomm-robotics-platform-rb5-now-available.mdx b/src/content/blogs/latest-support-for-debian-openembedded-releases-for-qualcomm-robotics-platform-rb5-now-available.mdx new file mode 100644 index 0000000..d3d6ec4 --- /dev/null +++ b/src/content/blogs/latest-support-for-debian-openembedded-releases-for-qualcomm-robotics-platform-rb5-now-available.mdx @@ -0,0 +1,27 @@ +--- +title: "Latest support for Debian & OpenEmbedded releases for Qualcomm Robotics + Platform RB5 " +description: "Check out this blog to find out what is new in this release and + where to find more information to get started. " +date: 2021-09-30T11:03:39.000Z +image: linaro-website/images/blog/code-background_1 +tags: + - open-source + - iot-embedded + - linux-kernel +author: vicky-janicki +related: [] + +--- + +Linaro has released the latest support for Debian and OpenEmbedded releases for the following Qualcomm Snapdragon based [96Boards devices](https://www.96boards.org/) : the [Dragonboard 410c](https://www.96boards.org/product/dragonboard410c/), the [Qualcomm Robotics platforms RB3](https://www.96boards.org/product/rb3-platform/) and the [Qualcomm Robotics platforms RB5](https://www.96boards.org/product/qualcomm-robotics-rb5/). + +We are committed to keeping the Dragonboard platforms up to date with the latest Linux kernels and user space components for the Qualcomm boards. The Debian and OpenEmbedded releases are usually in sync, and offer different approaches to build a Linux system. + +The Debian releases for the Qualcomm Snapdragon based 96boards platforms include a major system upgrade to Debian Sid, a major kernel upgrade to Linux Kernel 5.13.9 and a MESA upgrade to v20.3.5. The OpenEmbedded releases include a system upgrade to Dunfell (3.1.10 LTS), a major kernel upgrade to Linux Kernel 5.13.9 and an SDK release based on Dunfell (3.1.10). Note that a firmware/bootloader upgrade might be required for this release. You must ensure you are using the most recent firmware version. Refer to [the release notes](https://www.linaro.org/downloads/#releases_for_snapdragon) for more details. + +This is the first Linaro release which provides Debian support for the RB5 platform. Debian reference images with the GNOME3 desktop environment are provided. + +Access the above releases, documentation, and release notes from [the Linaro downloads page](https://www.linaro.org/downloads/), community support can be provided on [the 96boards forum](https://discuss.96boards.org/). + +For more information on Linaro and the work we do, make sure to [get intouch](https://www.linaro.org/contact/)! diff --git a/src/content/blogs/let-s-boot-the-mainline-linux-kernel-on-qualcomm-devices.mdx b/src/content/blogs/let-s-boot-the-mainline-linux-kernel-on-qualcomm-devices.mdx new file mode 100644 index 0000000..7797275 --- /dev/null +++ b/src/content/blogs/let-s-boot-the-mainline-linux-kernel-on-qualcomm-devices.mdx @@ -0,0 +1,373 @@ +--- +title: Booting the Mainline Linux Kernel on Qualcomm Devices +description: > + In this blog, Vinod Koul shares detailed instructions to get started with the + mainline Linux kernel on arm64 Qualcomm Snapdragon based devices. +date: 2021-10-28T07:29:06.000Z +image: linaro-website/images/blog/tech_background_1 +tags: + - linux-kernel + - arm +author: vinod-koul +related: [] + +--- + +One of the benefits of Linaro Core and Club memberships is the option to have a Landing Team. A Landing Team is a group of Linaro engineers which is dedicated to one Linaro member, and whose work contributes to both private and public projects. Beginning in 2014, the Linaro Qualcomm Landing Team has been an active contributor to upstream Qualcomm platforms, building stable releases for the 96Boards Dragonboard program and adding support to the next generation Qualcomm mobile platforms. Fostering and partnering with the open source community is a primary goal of this Landing Team, often in the role of maintainers for Qualcomm sub-systems. + +In addition, [Linaro Developer Services](https://www.linaro.org/services/) has a dedicated team which provides Linux Board Support Package (BSP) development, maintenance and optimization for Qualcomm platforms such as Qualcomm Snapdragon, to companies building products based on Qualcomm processors. Check out Linaro Developer Services for additional information on how Linaro Developer services can help. + +In this blog, Senior Engineer Vinod Koul from the Linaro Qualcomm Landing Team shares detailed instructions to get started with the mainline Linux kernel on arm64 Qualcomm Snapdragon based devices. + +## Overview + +The current advanced state of the mainline Linux kernel for Qualcomm Snapdragon platforms is such that it is becoming easier to run an upstream Linux kernel flavor on a Qualcomm based device, such as a Snapdragon based development board or an actual form factor device (a mobile phone, IOT device or [a laptop](https://github.com/aarch64-laptops/build)) without significant changes or special patches. + +This blog post shows step by step instructions to download, build and boot a fully functional Linux system which can be used for kernel development on any modern Qualcomm Snapdragon based device. + +The blog assumes one is familiar with version control tools like git, steps to build the Linux kernel (any architecture) and installing packages on the development environment one is using. + +## Getting Sources + +There are multiple Linux kernel source trees available for one to use. For example[ Linus Torvald’s upstream kernel tree](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/), [linux-next tree](https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/), the [Qualcomm community upstream tree](https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git/) or [Linaro Qualcomm Landing Team’s integration tree](https://git.linaro.org/landing-teams/working/qualcomm/kernel.git/). We recommend cloning Linus' tree as it is the upstream tree for kernel development and add other related trees as remotes. + +``` +$ git clone +git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git +``` + +Optionally one can also use linux-next for integration and testing. This is a merge of most maintainer trees. + +``` +$ git remote add next +git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git +``` + +The Linaro Qualcomm Landing Team has various pieces which are works in progress. The below tree would typically contain these pieces. Look for integration-linux-qcomlt branch which is a merge of various component branches using Continuous Integration (CI). + +``` +$ git remote add qcomlt +https://git.linaro.org/landing-teams/working/qualcomm/kernel.git +``` + +## Building the kernel + +One can use GCC to build the kernel as most of the Linux distributions include the gcc-aarch64 toolchain. +Gcc can be installed on RPM based distributions such as Fedora by: + +``` +$ sudo dnf install gcc-aarch64-linux-gnu +``` + +And on Debian based distributions by: + +``` +$ sudo apt install gcc-aarch64-linux-gnu +``` + +### Cross compiling for aarch64 + +In order to compile for a different target architecture (aarch64) on a host machine (for example x86), we need to specify the architecture of target and cross compiler to the kernel makefile. Architecture is specified with the flag ARCH which in this case is “arm64” and cross compiler using flag CROSS\_COMPILE which would be “aarch64-linux-gnu-”. + +### Steps to build the kernel + +To compile the kernel, we first need to set up the configuration (“config”) file. In the kernel, we have config files for different architectures. So, on specifying the ARCH=”arm64”, the build system will pick the appropriate architecture config file: + +``` +$ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- defconfig +``` + +Then, the make command should be provided with arguments to compile the kernel, device tree bindings (dtbs) and modules as below: + +``` +$ make -j$(nproc) ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- Image.gz dtbs modules +``` + +Next, we create a module library by installing and stripping modules (which helps to reduce the overall size of modules). Then we install the modules to a local directory so that we can move it to the target later. The Linux build system can do that for us, as shown below: + +``` +$ make -j$(nproc) ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- modules_install INSTALL_MOD_PATH=./modules_dir INSTALL_MOD_STRIP=1 +``` + +## initramfs + +Now that we have built the kernel, modules and dtbs, we need to package it into a bootable image and boot the board. For this discussion we are going to use the [Qualcomm® Robotics RB3 Development Platform (based on the 96Boards Consumer specification)](https://www.96boards.org/documentation/consumer/dragonboard/dragonboard845c/) as an example. + +Typically in embedded programming, one would try to use a variant of [initramfs](https://www.kernel.org/doc/html/latest/filesystems/ramfs-rootfs-initramfs.html). We can build our own image using [buildroot](https://www.google.com/url?q=https://buildroot.org/\&sa=D\&source=docs\&ust=1635411319572000\&usg=AOvVaw2LC26nkb-H4FRXZ8f77ksZ) etc., but for this example, we are going to use a reference initramfs provided and maintained by Linaro that is released as a cpio image. The **latest** images from Linaro are available here: + +* For arm64: [https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/latest/](https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/latest/) especially: https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/latest/initramfs-test-image-qemuarm64-*.rootfs.cpio.gz* +* *For 32-bit arm: [https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm/latest/](https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm/latest/), + especially: https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm/latest/initramfs-test-image-qemuarm-*.rootfs.cpio.gz + +This reference initramfs also contains tiny images if one needs to keep the overall image size smaller. + +[A Linux initramfs](https://www.kernel.org/doc/html/latest/filesystems/ramfs-rootfs-initramfs.html) is a compressed (gzip) “[cpio](https://www.linuxjournal.com/article/1213)” format archive, which is extracted into a root filesystem when the kernel boots up. After extracting, the kernel checks to see if rootfs contains a file “init”, and if so it executes it as PID 1. + +A benefit of using a compressed cpio archive is that we can concatenate several compressed cpio archives to overlay and create the final image required for boot, since all cpio archives will be decompressed and overlaid serially. Here we would like to add a module cpio archive so that initramfs finds the modules and loads them. To create the module cpio, run the following command: + +``` +$ (cd modules_dir; find . | cpio -o -H newc | gzip -9 > ../modules.cpio.gz) +``` + +And finally create the final initramfs image: + +``` +$ cat initramfs-test-image-qemuarm64-20210422073919-769.rootfs.cpio.gz modules.cpio.gz > final-initramfs.cpio.gz +``` + +## Preparing the board + +In order to boot on a Qualcomm Snapdragon based board (MTP, HDK, form factor device etc), it is recommended to perform the following steps. + +Many of the most recent boards ship with a dtbo partition and when loading a kernel and DTB, the bootloader will overlay the DTB with the content of the dtbo partition causing it to go badly. In order to avoid this situation, it is recommended to erase/program the dtbo partition. + +For some boards, erasing the dtbo partition causes the bootloader to fail. So it is recommended to program NULL to this partition. + +``` +$ dd if=/dev/zero of=zero.bin bs=4096 count=1 + +$ fastboot flash dtbo_a zero.bin +$ fastboot flash dtbo_b zero.bin + +$ fastboot reboot +``` + +## Building the boot image + +For creating boot images which can be loaded by fastboot, we use the mkbootimg tool. This can be obtained from the skales repository. This contains various tools and we use mkbootimg for creating the image. + +``` +$ git clone https://git.codelinaro.org/clo/qsdk/oss/tools/skales +``` + +mkbootimg needs to be passed kernel, dtb and final initramfs image. First, we append the dtb image to the kernel image. Please note that we should use the appropriate DTB for the board one is working on. For the below example, we are using the Qualcomm® Robotics RB3 Development Platform as noted earlier, so the DTB used is sdm845-db845c.dtb. + +``` +$ cat arch/arm64/boot/Image.gz \ +arch/arm64/boot/dts/qcom/sdm845-db845c.dtb > Image.gz+dtb + +CMDLINE="ignore_loglevel earlycon” + +$ ./skales/mkbootimg --kernel Image.gz+dtb \ + --cmdline ${CMDLINE} --ramdisk final-initramfs.cpio.gz \ + --base 0x80000000 --pagesize 4096 --output boot.img +``` + +The resulting boot image can be booted on your board using fastboot. It is recommended to use slot ‘b’ for booting. Slot ‘a’ can also be used if the board supports that, but some production devices don't boot when using slot 'a', so using slot 'b' is recommended in those cases. + +This will not program the boot image into the onboard storage (eMMC/UFS), but load it and boot from it. During a successful boot, one should see the serial console printing messages about booting the kernel and see the shell prompt on the serial console at the end. + +``` +$ fastboot -s set_active b + + +$ fastboot -s boot boot.img +``` + +Below is the snippet of boot log on the RB3 board: + +``` + +Fastboot: Initializing... +Fastboot: Processing commands +Fastboot Action (Press to select): SAT +Handling Cmd: getvar:slot-count +Handling Cmd: set_active:a +SetActiveSlot: _a already active slot +Handling Cmd: download:026d9000 +Download Finished +Handling Cmd: boot +A/B retry count NOT decremented +Booting Into Mission Mode +No dtbo partition is found, Skip dtbo +Exit key detection timer +GetVmData: making ScmCall to get HypInfo +GetVmData: No Vm data present! Status = (0x3) +No Ffbm cookie found, ignore: Not Found +Memory Base Address: 0x80000000 +Decompressing kernel image start: 13555 ms +Decompressing kernel image done: 21000 ms +BootLinux: failed to get dtbo image +DTB offset is incorrect, kernel image does not have appended DTB +Cmdline: console=tty0 console=ttyMSM0,115200n8 pd_ignore_unused clk_ignore_unused root=/dev/sda1 rw rootwait earlycon androidboot.bootdevice=1d84000.ufshc androidboot.serialno=512e84bb androidboot.baseband=msm + +RAM Partitions +Add Base: 0x0000000080000000 Available Length: 0x00000000FDFA0000 +WARNING: Unsupported EFI_RAMPARTITION_PROTOCOL +ERROR: Could not get splash memory region node +kaslr-Seed is added to chosen node + +Shutting Down UEFI Boot Services: 22557 ms +BDS: LogFs sync skipped, Unsupported +App Log Flush : 0 ms +Exit BS [22716] UEFI End +[ 0.000000] Booting Linux on physical CPU 0x0000000000 [0x517f803c] +[ 0.000000] Linux version 5.15.0-rc5-00004-gd7f6a1ce1090 (vkoul@kurma) (aarch64-linux-gnu-gcc (GCC) 11.2.1 20210728 (Red Hat Cross 11.2.1-1), GNU ld version 2.35.2-1.fc34) #22 SMP PREEMPT Wed Oct 20 19:05:17 IST 2021 +[ 0.000000] Machine model: Thundercomm Dragonboard 845c +[ 0.000000] efi: UEFI not found. +[ 0.000000] earlycon: qcom_geni0 at MMIO 0x0000000000a84000 (options '115200n8') +[ 0.000000] printk: bootconsole [qcom_geni0] enabled + +... + +[ 0.000000] Kernel command line: console=tty0 console=ttyMSM0,115200n8 pd_ignore_unused clk_ignore_unused root=/dev/sda1 rw rootwait earlycon androidboot.bootdevice=1d84000.ufshc androidboot.serialno=512e84bb androidboot.baseband=msm +[ 0.000000] Dentry cache hash table entries: 524288 (order: 10, 4194304 bytes, linear) +[ 0.000000] Inode-cache hash table entries: 262144 (order: 9, 2097152 bytes, linear) +[ 0.000000] mem auto-init: stack:off, heap alloc:off, heap free:off +[ 0.000000] software IO TLB: mapped [mem 0x00000000fa000000-0x00000000fe000000] (64MB) +[ 0.000000] Memory: 3655892K/4161152K available (13056K kernel code, 1982K rwdata, 5488K rodata, 3072K init, 438K bss, 472492K reserved, 32768K cma-reserved) +[ 0.000000] SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=8, Nodes=1 + +... + +[ 0.013495] printk: console [tty0] enabled +[ 0.017841] Calibrating delay loop (skipped), value calculated using timer frequency.. 38.40 BogoMIPS (lpj=76800) +[ 0.028209] pid_max: default: 32768 minimum: 301 +[ 0.032984] LSM: Security Framework initializing +[ 0.037791] Mount-cache hash table entries: 8192 (order: 4, 65536 bytes, linear) +[ 0.045277] Mountpoint-cache hash table entries: 8192 (order: 4, 65536 bytes, linear) +[ 0.056323] rcu: Hierarchical SRCU implementation. +[ 0.062648] EFI services will not be available. +[ 0.067725] smp: Bringing up secondary CPUs ... +[ 0.074501] Detected VIPT I-cache on CPU1 +[ 0.074640] GICv3: CPU1: found redistributor 100 region 0:0x0000000017a80000 +[ 0.074795] CPU1: Booted secondary processor 0x0000000100 [0x517f803c] +[ 0.076641] Detected VIPT I-cache on CPU2 +[ 0.076697] GICv3: CPU2: found redistributor 200 region 0:0x0000000017aa0000 +[ 0.076778] CPU2: Booted secondary processor 0x0000000200 [0x517f803c] +[ 0.078589] Detected VIPT I-cache on CPU3 +[ 0.078622] GICv3: CPU3: found redistributor 300 region 0:0x0000000017ac0000 +[ 0.078689] CPU3: Booted secondary processor 0x0000000300 [0x517f803c] +[ 0.081238] CPU features: detected: Spectre-v2 +[ 0.081257] Detected VIPT I-cache on CPU4 +[ 0.081291] GICv3: CPU4: found redistributor 400 region 0:0x0000000017ae0000 +[ 0.081360] CPU4: Booted secondary processor 0x0000000400 [0x516f802d] +[ 0.083681] Detected VIPT I-cache on CPU5 +[ 0.083714] GICv3: CPU5: found redistributor 500 region 0:0x0000000017b00000 +[ 0.083783] CPU5: Booted secondary processor 0x0000000500 [0x516f802d] +[ 0.086244] Detected VIPT I-cache on CPU6 +[ 0.086278] GICv3: CPU6: found redistributor 600 region 0:0x0000000017b20000 +[ 0.086346] CPU6: Booted secondary processor 0x0000000600 [0x516f802d] +[ 0.088967] Detected VIPT I-cache on CPU7 +[ 0.089003] GICv3: CPU7: found redistributor 700 region 0:0x0000000017b40000 +[ 0.089073] CPU7: Booted secondary processor 0x0000000700 [0x516f802d] +[ 0.089158] smp: Brought up 1 node, 8 CPUs + +... + + +[ OK ] Started Load/Save RF Kill Switch Status. +[ OK ] Finished Update UTMP about System Boot/Shutdown. +[ OK ] Finished Update is Completed. +[ OK ] Finished Run pending postinsts. +[ OK ] Reached target System Initialization. +[ OK ] Started Daily Cleanup of Temporary Directories. +[ OK ] Reached target Timers. +[ OK ] Listening on D-Bus System Message Bus Socket. +[ OK ] Reached target Sockets. +[ OK ] Reached target Basic System. + Starting Bluetooth service... +[ OK ] Started D-Bus System Message Bus. +[ 6.955958] NET: Registered PF_ALG protocol family +[ OK ] Started A minimalistic net�…Pv4, rdisc and DHCPv6 support. +[ 7.015787] 8021q: 802.1Q VLAN Support v1.8 +[ OK ] Reached target Network. +[ OK ] Started QIPCRTR Name Service. +[ OK ] Started Qualcomm PD mapper service 7[0m. +[ OK ] Started Qualcomm remotefs service 0m. +[ OK ] Started QRTR TFTP service. +[ OK ] Finished Permit User Sessions. +[ OK ] Started Getty on tty1. +[ OK ] Started Serial Getty on ttyMSM0. +[ OK ] Reached target Login Prompts. +[ OK ] Stopped User Login Management. + Starting Load Kernel Module drm... +[ OK ] Finished Load Kernel Module drm. +[ OK ] Started Bluetooth service. +[ OK ] Reached target Bluetooth. +[ OK ] Started Qualcomm PD mapper service +[ OK ] Reached target Multi-User System. + Starting Update UTMP about System Runlevel Changes... +Reference-Platform-Build-X11 3.0+linaro qemuarm64 ttyMSM0 + +qemuarm64 login: root (automatic login) + +root@qemuarm64:~# +``` + +Note that [Android Debug Bridge](https://developer.android.com/studio/command-line/adb) (adb) is *not supported* in the default initramfs image, so it won’t work here. All the debugging needs to be performed over serial. But if the board supports Ethernet or a USB Ethernet dongle is available that can be used as well. + +## Advanced Topics + +In this section, we discuss some advanced topics which may be useful when working with upstream kernels. + +### Kernel configuration + +To add or remove a component from the kernel, we can use menuconfig. It opens up the menuconfig CUI. + +``` +$ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- menuconfig +``` + +Tip use “/” to search for options and use the option number to navigate to that option. + +Once we have selected the options, we need to recompile the kernel, modules and dts. + +### Boot Parameters + +Various cmdline parameters are available which help in debugging. Here we discuss a few of them which can be very useful to debug some issues. A more exhaustive reference of the parameters can be found in kernel source [documentation](https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html). + +* keep\_bootcon: This option does not unregister the boot console at start. This is only useful for debugging when something happens in the window between unregistering the boot console and initializing the real console. +* ignore\_loglevel: Ignore loglevel setting - this will print all kernel messages to the console. Useful for debugging. We also add it as a printk module parameter, so users could change it dynamically, usually by /sys/module/printk/parameters/ignore\_loglevel. +* earlycon: Output early console device and options. When used with no options, the early console is determined by stdout-path property in device tree's chosen node. +* initcall\_debug: This option traces initcalls as they are executed. This is very useful for working out where the kernel is dying during startup. Beware! This is very verbose. + +### Adding content to the Boot Image + +The initramfs image loaded in the previous section contains minimal initramfs cpio and modules cpio. In order to perform various testing features, additional content can be added to this initramfs image. + +One interesting benefit of the cpio archive approach is that we can concatenate multiple cpio images and create the final cpio image to be loaded. + +### Bootrr + +The Linaro Qualcomm Landing Team uses bootrr to check sanity and ensure all modules are loaded, so we can create a bootrr cpio archive for this + +The Bootrr build system supports creating cpio archives : + +``` +$ (cd bootrr; make cpio.gz) +``` + +### Test Utilities + +Any test binaries and libraries can be added as well. This can be achieved by creating the disk layout one would like on the target and copying the binaries and libraries at appropriate locations. For example it is recommended to copy binaries to usr/bin in this utilities directory. + +In order to automount a partition (like firmware), we can add an entry to inittab which would be overlayed. The last entry was added to mount the vendor\_b partition to /mnt. + +We can also create symlinks, for example to link firmware to /lib/firmware/ + +``` +$ cat test-utils/etc/fstab + +/dev/root / auto defaults 1 1 +proc /proc proc defaults 0 0 +devpts /dev/pts devpts mode=0620,gid=5 0 0 +tmpfs /run tmpfs mode=0755,nodev,nosuid,strictatime 0 0 +tmpfs /var/volatile tmpfs defaults 0 0 + +/dev/disk/by-partlabel/vendor_b /mnt auto ro,defaults 0 0 +``` + +After adding all the required pieces, we can create the cpio archive: + +``` +$ (cd test-utils; find . | cpio -o -H newc | gzip -9 > ../test-util.cpio.gz) +``` + +And finally, create the final initramfs image which contains bootrr and test-utils which should be used for making the boot image: + +``` +$ cat initramfs-test-image-qemuarm64-20210422073919-769.rootfs.cpio.gz \ +modules.cpio.gz bootrr.cpio.gz \ +test-util.cpio.gz > final-initramfs.cpio.gz +``` diff --git a/src/content/blogs/let-s-talk-about-homomorphic-encryption.mdx b/src/content/blogs/let-s-talk-about-homomorphic-encryption.mdx new file mode 100644 index 0000000..4a4df6d --- /dev/null +++ b/src/content/blogs/let-s-talk-about-homomorphic-encryption.mdx @@ -0,0 +1,45 @@ +--- +title: Let’s talk about Homomorphic Encryption +description: In this blog post we’re going to set aside the traditional + encryption and instead have a look at something called Homomorphic Encryption. + Read more here. +date: 2020-05-22T11:16:28.000Z +image: linaro-website/images/blog/cyber-security +tags: + - security +author: joakim-bech +related: [] + +--- + +#### **Homomorphic Encryption.** + +Encryption is something that all of us are using whether we know it or not. Many of the things that you are doing on a daily basis requires some kind of encryption to protect your information from being read and used by others. Encryption by itself is a big and complex topic, covering topics like source of randomness, different encryption algorithms, different modes, how often to use (or not use) encryption keys. In this blog post we’re going to set aside the traditional encryption as we’re used to and instead have a look at something called Homomorphic Encryption (HE). The word homomorphic in Homomorphic Encryption implies that it is possible to modify data. Malleability usually is a property that we don’t want when working with encryption, which is one of the reasons why we have to add some kind of MAC (HMAC etc.) on top of our encryption to be able to protect the integrity of our ciphertext. Contrary to traditional encryption, this is actually a necessary property when working with Homomorphic Encryption. This will not be a deep dive in the algorithms and the mathematics involved, simply because me as author of this article is still learning about all this. Instead it’s meant to serve as an eye opener to a technology that might be important in a few years from now. In short there are use cases that could serve as a business opportunity and be a source for revenue for the ones willing to invest time and money in this early on. + +#### **Technical background.** + +Homomorphic Encryption isn’t a new thing, people have been working with this since the late 70’s when researchers were working on RSA (which in its basic form is a multiplicative Homomorphic Encryption scheme, also called Partial Homomorphic Encryption). They posed the question asking what could be achieved with a fully Homomorphic Encryption scheme. Their conclusion was that it should be possible to make computations on arbitrary encrypted data without knowledge or access to the decryption key. Still more than 30 years later we haven’t been able to put it into practical use. But, in the recent decade, researchers have implemented software (see [HElib](https://github.com/shaih/HElib) and [Microsoft SEAL](https://www.microsoft.com/en-us/research/project/microsoft-seal/) for example) and at the same time computational performance in general have seen great improvements and as a result of that researchers believe that we are at the inflection point now, i.e. it looks like it could be used in practice in a not too distant future. + +#### **What problems will Homomorphic Encryption solve for us?** + +In short, Homomorphic Encryption lets you make computations directly on encrypted data without the need to have access or any knowledge about the key used to decrypt the data. So instead of doing computations on plaintext you can do some computations directly on the ciphertext without first having to decrypt it. The piece of ciphertexts that you make computations on will also result in an encrypted blob and only the one possessing the decryption key will be able to decrypt the message. So it’s obvious that the one doing the computations for us cannot learn anything about the unencrypted data. This sounds interesting, but what are the actual use cases? + +Today it’s not uncommon to leverage computing power in big data centers. I.e., you send your data and jobs up to some cloud service, which in turn does the heavy lifting in terms of using lots of computational resources on your data and once complete, you can download and inspect the results. This scenario is fine if you’re using data where you don't have to consider privacy. If you are worried about a man in the middle watching you traffic, then you typically transfer information using an encrypted channel using SSL for example. If you also are worried about having data stored in plaintext at the cloud service provider, then you always pre-encrypt the data before uploading it to the cloud service provider. However, in that case you cannot leverage the cloud service computing power without first decrypting the data on your own (directly at the cloud service providers servers) or by letting the cloud service company have access to your key. In both cases your data will at some point be in the clear which might be a problem or at least a privacy concern. If you have requirements of never exposing data in the clear on remote servers, then at best, the cloud service provider works like an encrypted backup server. + +With Homomorphic Encryption, you would still encrypt your data locally, upload it to the cloud service provider, but the big difference now is that the service provider can run jobs and do computations directly on the encrypted data on your behalf. The computed result would still be encrypted and can be downloaded locally and then decrypted. The nice thing here is that it is possible to operate on subsets of the data. Compare this to the “encrypted backup” case, where you basically would have had to download everything locally, do the computations on a subset, encrypt everything again and upload it to the cloud service. If nothing else, a big waste of bandwidth and time spent on doing that. + +The computational offloading in itself is one positive thing, but the preservation of privacy is probably the most valuable thing. Since it enables companies, researchers etc. to work with data without knowing about details in the data they are working with. As a very hypothetical example, let’s say that someone wants to find out if there is any correlation between cancer and kidney problems. For such a study to take place today, someone (a person permitted to work with medical records) would first need to scrub the personal information from the data set used in the study before handing it over to the one making the study. That might be sufficient, but with Homomorphic Encryption you could basically write a function that looks up all medical records for cancer patients, compute the correlation between cancer and kidney issues and give us the result. Since it’s Homomorphic Encryption the result would still be encrypted. The server who did the computations for you has no clue what you’ve done and what your result was, but it did all the heavy lifting for you. Promoters of Homomorphic Encryption often mentions secure search queries as one use case. You could for example use something like a Google maps service to search for nearby restaurants and the service would be able to provide a result without knowing what you searched for, who you are and where you are. A third example is electronic voting schemes. Imagine that the voters actual votes only are stored in encrypted form. Then Homomorphic Encryption could add up all voters that have voted for candidate A, candidate B and so on and only present the result without revealing individual votes. As you can imagine there are many use cases and twists like this. + +#### **Existing software.** + +There are different variants of Homomorphic Encryption called Fully-, Somewhat- and Partially-Homomorphic Encryption. Without going into detail, they differ in the amount of operations they can do and the kind of operations they are able to use in their computations. Out of these three, the Fully Homomorphic Encryption (FHE) is the most capable of them all and that is the one that researchers are trying to realize in terms of both software as well as hardware (FHE accelerators). Why? With FHE you can do both (modular) multiplication as well as (modular) addition. If you think about how our computers are built, they are basically built with (boolean) logical gates, i.e. something that can be constructed with only multiplications and additions. So in theory you can create any type program using FHE. + +Earlier in this article we mentioned [HElib](https://github.com/shaih/HElib) and [Microsoft SEAL](https://www.microsoft.com/en-us/research/project/microsoft-seal/). HELib implements FHE using the Brakerski-Gentry-Vaikuntanathan ([BGV](https://eprint.iacr.org/2011/277)) scheme and Microsoft SEAL implement FHE using Brakerski/Fan-Vercauteren ([BFV](https://eprint.iacr.org/2012/144)), both which are about using exact arithmetic on vectors of numbers, i.e. there will be no approximations or errors in the result. There are other schemas out there also, but with different properties and characteristics. Both HElib and SEAL compile and run nicely on a regular desktop computer. I.e. it's not hard to try using this technology on your own. There are other software libraries out there, but these are the ones that we have started to familiarize us with. + +#### **Issues with the state of the art implementations.** + +Functionality wise the current implementations are working, but the performance is really poor. It’s many orders of magnitude slower to use Homomorphic Encryption compared to working with traditional encryption as we are used to. Therefore, it is still impractical to use in reality. Today's state of the art implementations are working with Linear functions, polynomials and you could even with higher degree polynomials fit nonlinear functions, but with that comes yet additional cost in terms of performance. Another problem is that it’s not easy (even possible?) to implement rudimentary functions. For example doing branching on encrypted data isn’t possible on current implementations, even though that in theory should be doable due to the logic gate reasoning. As a consequence of that, instead researchers are looking for ways to implement specific functions that should support specific use cases. Alongside with that other tricks and optimizations are developed to reduce the number of operations involved. A simple example is that it’s better to do (YxY)2 than (YxYxYxY) if you want to calculate Y4, i.e. three multiplications instead of four multiplications. + +Linaro has been working with major SoC vendors for many years and has a great track record of driving joint effort upstreaming work to many well known open source projects, including Linux kernel, UEFI, U-Boot, OP-TEE, TrustedFirmware-A/M and Zephyr to mention a few, that are touching security in one or another way. + +Perhaps a collaborative approach could be the way forward for the future? diff --git a/src/content/blogs/linaro-a-decade-of-development.mdx b/src/content/blogs/linaro-a-decade-of-development.mdx new file mode 100644 index 0000000..b1b6246 --- /dev/null +++ b/src/content/blogs/linaro-a-decade-of-development.mdx @@ -0,0 +1,64 @@ +--- +title: A Decade of Achievement +description: " In this article, former Linaro CTO David Rusling takes a look at + how Linaro came to be and how the company has moved forward throughout the + last 10 years. Read more here." +date: 2020-06-18T02:21:00.000Z +image: linaro-website/images/blog/10-year-graphic-horizonal1 +tags: + - arm + - datacenter + - linux-kernel + - android +author: david-rusling +related: [] + +--- + +### **Introduction** + +It is hard to believe that Linaro is 10 years old this year, but it is and like everything in life, has evolved during this time. One thing, though, Linaro remains a place where the Arm ecosystem collaborates. + +### **ARM and Open Source** + +Arm’s business model was a key factor in their success. Unusual at the time, they licensed their technology to system on chip (SoC) makers who then built products. In essence, they acted as the R\&D department for their partners. These partners all gained because Arm built an ecosystem of tools and software around their architecture, an architecture which steadily gained capabilities. + +Another factor in Arm’s success is open source and this is where my own history becomes interwoven with Arm’s story. I worked for Digital, firstly on Linux on the Alpha processor and then on Linux on the StrongArm. In 1998, as Digital Semiconductor was acquired by Intel, it was logical, perhaps inevitable, that I moved to Arm. I brought my open source experience with me. I had seen the power of open source and saw its importance to the Arm architecture and ecosystem. Arm had much early success in the mobile space with Symbian OS (remember Nokia phones?) then two things happened that changed the world. Firstly, Apple invented the iPhone. Whilst it was ridiculed when it was first launched for everything that it didn’t have, the iPhone seriously changed what people expected of a mobile phone and how they expected to interact with it. Secondly, Android happened. For many this may have seemed derivative, but Android has a very long history as it is based on Linux, which had been around since the early 90s. + +Whilst Apple continued to develop the iPhone (and look where that has led), Android was enthusiastically taken up by the Arm ecosystem. One of the beauties of open source is that it is available to anyone, or company, that wants to use it. + +### **Mobile** + +When many companies compete to bring products to market, fragmentation will be a problem. That problem got worse, much worse, as Android became more popular. The rewards for releasing the latest technology were so great that this was, in retrospect, inevitable. It is also fair to say that, at the time, the ARM ecosystem companies were not particularly adept open source citizens. + +There were two problems facing the ARM ecosystem in 2010 - fragmentation in the Linux kernel and support for the Arm architecture in the GNU toolchain. These were the first problems that Linaro and its members focused on. Getting everyone’s Linux kernel engineers together and collaborating quickly started to solve the fragmentation issues. However, it became clear that more was needed, especially after Linus Torvalds remarked that more was needed. “Somebody needs to get a grip in the Arm community.” That remark sent shockwaves through the ARM ecosystem and helped galvanise Linaro’s members to form the Arm sub-architecture maintenance team in 2011. We were successful, as around a year later, Linus was lauding the Arm kernel community for being exceptionally well organised. I think that this marks the moment in time when the ARM community became full members of the open source community. This trend continued as we worked on power management within the kernel. + +### **The Datacentre** + +As the ARMv8 architecture was being released, the ARM Ecosystem asked Linaro to support their efforts to support ARM in the datacentre. As a way of focussing on this market segment, we formed the Linaro Enterprise Group, or LEG, in late 2012. This group was later renamed to Linaro Data Centre Group, LDCG. Other groups focussing on networking and embedded were later created. The main challenges were again fragmentation, especially in boot architecture and ensuring that a myriad of open source software needed in the data centre were available and performant on the Arm architecture. + +The data centres operate in a completely different way to the mobile phone market. It is driven by standards, from how the system boots (UEFI) to how software is reliably deployed at scale. As an example of how different, data centres distributions rely on hardware support being upstream before they will support that hardware. It is also worth noting that during the lifetime of this segment group, how software is deployed at scale was revolutionised by the adoption of containers and open source deployment frameworks such as OpenStack (which Linaro and its members helped ensure that OpenStack ran well on Arm based systems). + +LDCG started looking at High Performance Computing (HPC) in 2016. This was a natural extension of the work that LDCG had been doing, and Linaro and its members looked at standardisation, interoperability, orchestration, all driven by user cases important to our members. The engineering focus was on OpenHPC, compiler performance, SVE enablement and hardware deployment. + +The Linaro HPC work has been incorporated into Fujitsu’s A64FX processor release (see [this announcement](https://www-techradar-com.cdn.ampproject.org/c/s/www.techradar.com/amp/news/little-known-japanese-cpu-threatens-to-make-nvidia-intel-and-amd-obsolete-in-hpc-market) about deploying the A64FX at Nagoy University). + +### **Networking** + +The nature of collaboration within Linaro has changed and a good illustration of that is Time Sensitive Networking (TSN). TSN is a set of standards guaranteeing the delivery of network data with time sensitive restrictions. One example is audio, after all you don’t want your favourite song stuttering during playback. More seriously, you really need that important warning to get displayed on your car’s console. + +Linaro’s edge networking group, LEDGE, identified a need to coalesce around a practical Linux kernel framework that supported all of their SoCs. After a lot of discussion, including persuading one of our members to change their implementation, they settled on the switchdev architecture. A lot of architectural discussion in Linaro revolves around hardware acceleration, a feature of the diverse approaches of the ARM ecosystem. TSN provides a good example of this in that a key concept of switchdev is that there are three priority classes, each represented by a different port. This enables the use of hardware resource allocation via queues. Even better, it standardises the access to network acceleration hardware. Traffic shapers are configured in the manner as a traditional hardware switch. + +We also pushed the configuration of shapers through integration in the TC framework (ip route2 userland commands). If you have a real switch, this does not change the configuration method and you can even determine how traffic is effectively switched between ports. + +TSN also illustrates the power of Linaro members having a common approach to technical problems as no single member could have influenced any given system architecture, but, together, they could. + +### **Big Data** + +We are living in the era of big data as we wire the planet and seek to orchestrate everything from homes, transportation, and factories to whole cities. This planet scale deployment is turning the old model of SoC vendors supplying commodity chips upside down as it becomes more and more complex to integrate devices securely with the myriad of cloud ecosystems that now exist. In many ways, the big data companies (Google, Facebook, Microsoft etc.) driving top to bottom software stacks and standards is reminiscent of the early history of computing with giants such as IBM and Digital supplying hardware and software. The difference now though is that open source software is the common substrate upon which this is all built. + +One of the values of the Arm licensing model is that many SoC manufacturers compete, bringing rapid innovation to a market. However, dealing with many vendors can be difficult and this Darwinian value of competition can sometimes be lost in the noise. Google, a Linaro member since 2014, uses its Linaro membership to help manage and leverage its relationship with its Arm ecosystem. Within the Linaro Consumer Group (LCG) Google has worked with the other members on technical work, such as the porting OPTEE (Open source Trusted Execution Environment) to AOSP for use by the Android TV project. Other collaboration includes supporting the Android ecosystem. It leveraged LCG as it lengthened its Long Term Support (LTS) kernel maintenance period from 2 to 6 years, something key to Android support. It also extended its Android Common kernel testing significantly, both in the number of tests and the number of hardware platforms that regularly run those tests (165M+ tests-to-date on real AOSP dev boards). That testing is in addition to other engineering work the segment group has been doing via the Android Open Source Project (AOSP). + +### **Summary** + +Linaro and the Arm ecosystem have both evolved, but collaboration continues to be our core activity. Our members continue to show us great trust as we work with them to get the most out of the Arm ecosystem and the markets that they participate in. The last 10 years were interesting and I’m pretty confident that the next 10 years will be too. diff --git a/src/content/blogs/linaro-and-the-linux-kernel.mdx b/src/content/blogs/linaro-and-the-linux-kernel.mdx new file mode 100644 index 0000000..e667a04 --- /dev/null +++ b/src/content/blogs/linaro-and-the-linux-kernel.mdx @@ -0,0 +1,121 @@ +--- +title: Linaro and the Linux Kernel +description: In this article, Daniel Lezcano reviews the work Linaro continues + to undertake & the contributions the company has made, together with the wider + OS community. +date: 2020-11-09T02:34:26.000Z +image: linaro-website/images/blog/open_source_keyboard_under_2mbjpg +tags: + - linux-kernel +author: daniel-lezcano +related: [] + +--- + +## Introduction + +A contribution to the Linux kernel requires experience. The development happens in a large meritocracy community, with a development process based on technical public discussions. There is no deadline, no profit, only one thing matters. Make Linux better. + +The industry has a different goal, which is to reduce the time to market as much as possible. That usually implies some short term development at the cost of misdesigns, duplicated efforts and individual changes without taking care of the existing. In addition, the turnover on the projects prevents any form of capitalisation of the knowledge for a long term support. + +These opposite goals lead to a fragmentation of the Linux code. In the long term, the gap between the Linux kernel and the product code is too large. The development time increases as well as the product validation with the consequence of a longer time to market. + +The solution is to upstream the product code into the Linux kernel. However, the lack of experience in open source, the different mindsets and cultures, can lead the upstreaming process to fail. + +In front of these apparent ecosystems incompatibility, Linaro takes place and creates a bridge between the open source and the industry, by guiding them to upstream the code in a long term vision on the Arm architecture. + +## The Linux Kernel + +## Some numbers + +In 2019 the Linux kernel had 27.8 million lines of code, dispatched in 66492 files. That is the result of 75,000 commits per year, 18,750 commits per kernel release. 15,600 persons have contributed from 1,600 companies since 2005. The Linux kernel is worth 14.4 billion dollars. + +[Linaro was the top #1 contributor to the Linux kernel for the v4.9 version.](https://www.linaro.org/blog/linaro-1-contributor-linux-kernel-4-9-release/) + +## Why upstream? + +The open source projects are usually protected by a GPL license. Anyone can make a copy of the project, carry out modifications and redistribute it. However, this is allowed only if the changes are published with the source code and the scripts to generate the binaries. Thus, that allows full control over the cloned project and gets rid of the review process of the corresponding community. This attractive solution has a major drawback among others. From the open source community perspective, those changes don’t exist and the original project will evolve independently. As we saw, there is a large number of changes per year for the Linux kernel, so both projects will diverge very quickly. + +In the next versions of the original project, more features will be available, so an update of the cloned project will be inevitable to merge the new features. Unfortunately, at this point, there is no guarantee that the specific changes in the cloned project will be compatible with the new original project version, i.e. merge conflicts, redesign, subtle bugs, revalidation of the current code. These are some of the costs of keeping the changes outside of the mainstream. + +Version after version, the cost of porting the product specific code in the new project releases will be higher. This will continue to increase until it reaches the point where the cost will be prohibitive, blocking the future features coming from the mainstream, like a branch breaking under a heavy load. + +The only way to work in a sane way, in the long term, is to upstream the code in the original project by joining and being part of the open source developer community. + +## The community + +The community is a group of people contributing to the same project by proposing changes. Those are reviewed by the community members and accepted if they make sense and if nobody complains, or when there is an agreement after a technical debate. All the discussions are oriented towards technical facts. The proposed changes must be beneficial to the community. The maintainers are special members. They have the knowledge and the history of the project, as well as the last word and the responsibility of commiting the changes. + +## The mindset + +The focus of the community is on making the project better. For this reason any member can comment on a change from anyone. Because an open source project is the result of evolution, the technical debate is always beneficial, even if the discussion can be harsh sometimes. Joining the community implies accepting the criticism and the rules of the open source development process. One of them is to act with respect and altruism with the community, by helping on some components out of the scope a company may have to work on. The community will perceive the contributor positively, as a symbiotic relationship instead of parasitism. + +That could be really hard for employees who are asked to code upstream in an open source project if the management misunderstands its philosophy. The employees will be torn between deadlines and technical achievements. + +# The Industry + +## The turnover on the projects + +Whatever the project, the management uses the engineer as resources equivalent to each other. They move them around depending on the load for different projects. That implies a latency for an engineer to be fully operational because of the learning curve ramp up. Another reason for changing the teams often is to prevent an engineer from being compulsory. Whatever we think about this managerial model, these are the facts and the reasons why some companies can not accumulate enough knowledge and credits in the open source ecosystem. + +## Capitalisation of the knowledge + +Some companies understood the importance of the open source development process and they created entities in their companies to handle the upstreaming support for their platforms. They operate autonomously to prevent conflicts between non-open source management and open source developers. This model allowed the creation of dedicated teams for the development of open source projects where the employees could become key players inside the community, capitalising the knowledge and the experience. The open source community is based on a meritocratie, so the time to gain credits and confidence is saved by keeping some employees full time on the project. + +However, such an approach for a company is not obvious, especially because of the open source development process misunderstanding, the pressure of the hierarchy for lean manufacturing, short term releases and the cost of putting in place an open source center. Another aspect is how the management perceives their employees evolving in the open source ecosystem autonomously. + +## Linaro + +The Arm architecture is massively deployed all around the world. The licensing model allows different companies to implement their version of the SoC. It results in the kernel making use of the different features on the SoC, which can be different for the same architecture. That was particularly true for the Armv7. The different SoC vendors clone the Android kernel which is itself a clone of the Linux kernel, and implement the same features on their side. The resulting code fragmentation is considerable. Linaro took the lead as a consortium to consolidate the Linux kernel, along with other open projects, over a decade ago. + +## Acting as a bridge between two ecosystems + +A SoC vendor can rely on experienced open source developers working in a dedicated landing team for the platform, where it wants to accelerate the upstream support. The Linaro engineers will act under an NDA and upstream the platform specific bits. In that, there are a few differences with the specialized companies proposing the same service. + +But Linaro is much more than that. The core engineering will be in charge of understanding the needs of the different members and will find a common generic solution usable for all members, yet leaving them enough space to add their differentiation. The exercise is difficult but, after more than 10 years, the list of achievements shows the setup is working. + +In addition, the SoC vendors will assign some engineers of their teams to the different Linaro projects. These will be guided through the open source development process, gain experience and share it with their company. + +Linaro consolidates the Arm architecture code fragmentation and helps the companies to understand what open source is. + +## Capitalization of the knowledge + +In order to bring together the most experienced open source developers, Linaro chose the distributed, remote working environment, building virtual teams across the world. These developers are the backbone of the company and provide valuable insight into the current and future technologies. Dedicated to open source, they are part of its community and are decisive actors for technical decisions. Most of the discussions happen offline on the mailing lists, but the open source events like the Linux Plumbers Conference or the Embedded Linux Conference are preferred places where they meet and exchange ideas. + +Given the implication of these developers in the open source ecosystem, they have an influence to orient and to propose technical solutions. + +## Consolidation of the kernel features + +One of the major goals of Linaro is to reduce the code fragmentation with the different SoC vendors, specific kernels, and for the mainstream. The features added in the custom kernels can be similar and the Linaro engineers have the role to identify the common pieces, improve them if it is possible, and propose a generic solution to be merged in the mainstream. The impact on the custom solutions is immediate, because the mainstream must ensure backward compatibility. + +## Linaro Challenges + +Acting as a bridge between the members and the open source ecosystem, Linaro has the responsibility of connecting both worlds. Because Linaro is involved in the open source projects, one side of the connection is easier than the other. On the other side, Linaro tries to move custom solutions to generic solutions. This implies that this puts into question the work already done by the member engineers who may never have faced an audit of their code, or their design, via a review process. Member engineers could perceive that as an act of depreciation, a hostile takeover to take control of the code. In addition, the misunderstanding of the open source ecosystem can exacerbate this feeling. + +Another aspect is the nature of Linaro which joins different members to collaborate on the common parts in order to prevent duplicated work and code fragmentation. Linaro has the responsibility to collect information to understand the technical needs of the different members and identify the common parts. A difficult task as the members are competitors on the market. + +## Linaro achievements + +Linaro maintainership in the Linux kernel. The following table lists the maintainers of the Linux kernel for the Arm architecture, the drivers and the generic frameworks. The frameworks are bigger in terms of size and changes submission traffic. This table demonstrates how Linaro is involved in the Linux kernel development and how de facto it has a voice in the community to make the upstreaming process as smooth as possible. Since its creation, Linaro has merged more than 23,000 changes in the kernel, especially in the generic frameworks where the implementation and the submission process are harder than a driver. + +![maintainer framework and driver table](/linaro-website/images/blog/linux-kernel-blog-1) + +The next table shows the noticeable achievements of Linaro in terms of functionalities for the Linux kernel, the list is not exhaustive but it shows Linaro is a major player in the Linux kernel ecosystem. + +![Linaro noticeable achievements table by year](/linaro-website/images/blog/linaro-noticeable-achievements-1) + +\[1] [Force Idle When a CPU Is Overheating](https://www.linaro.org/blog/force-idle-when-a-cpu-is-overheating/) + +\[2] [Thermal Notifications With Netlink](https://www.linaro.org/blog/thermal-notifications-with-netlink/) + +## Conclusion + +Linaro is a consortium dedicated to supporting the Arm architecture in the open source ecosystem. It is deeply involved in the Linux kernel development as well as other open source projects. Its role is to educate the members and make them comfortable with the open source development process. + +The Arm architecture, especially on the mobile and the embedded systems, is constantly evolving with more complexity and technical challenges to solve. For a decade, Linaro did a great number of achievements, especially in the Linux kernel side. It has successfully kept reducing the gap between the custom and mainstream kernels since then. That must be a continuous effort and, with the market pressure, the temptation of coming back to the out of mainstream kernels model is high. There is always the risk of returning to the code fragmentation. + +The Arm architecture has practically replaced all other architectures in embedded systems since Linaro started, which would not have been possible without creating and maintaining subsystems and platforms on the long term. + +## About The Author + +Daniel is a member within the Kernel Working Group (KWG). The group's primary focus is to be an active contributor to the upstream community and facilitate acceptance of our code into the Linux mainline kernel. Our goal is kernel consolidation - a single source tree with integrated support for multiple Arm SoCs and Arm-based platforms. diff --git a/src/content/blogs/linaro-brings-testing-and-automation-to-fosdem-2020.mdx b/src/content/blogs/linaro-brings-testing-and-automation-to-fosdem-2020.mdx new file mode 100644 index 0000000..d8cdc13 --- /dev/null +++ b/src/content/blogs/linaro-brings-testing-and-automation-to-fosdem-2020.mdx @@ -0,0 +1,30 @@ +--- +title: Linaro brings testing and automation to FOSDEM 2020 +description: In this blog we talk about Linaro's testing efforts that were + presented at FOSDEM 2020. +date: 2020-03-12T09:16:32.000Z +image: linaro-website/images/blog/48806077322_d5b5e6aaa0_k +tags: + - testing + - linux-kernel +author: anders-roxell +related: [] + +--- + +Anders Roxell is part of the [Linux Kernel Functional Testing (LKFT) project](https://lkft.linaro.org/) at Linaro. The mission of LKFT is to perform functional regression testing on select Linux kernel branches in real time and report any regressions as quickly as possible. In this blog, Anders talks about the devroom he recently co-organised at FOSDEM to get all the testing folks that attend the event in the same room. + +There are many different projects at the moment independently running their own versions of test suites. So far there has been little coordination on what tests are needed and/or a common definition of test plans. As a result, there is a lot of fragmentation in this space. If we can increase collaboration in the testing landscape, and find synergies between the different projects, we may be able to prevent more bugs from entering released kernels. + +This is why I decided to co-organise a devroom at FOSDEM, to help further drive collaboration and reduce fragmentation. While I didn’t get as much discussion as I perhaps had hoped, there was [a good variety of talks at the Testing and Automation devroom](https://fosdem.org/2020/schedule/track/testing_and_automation/) - covering topics such as kernel testing, writing tests in Go, testing large software, improving the culture of automated testing in FOSS and auto-healing clusters through negative testing. + +Here are some of the highlights: + +* [One test output format to unite them all](https://fosdem.org/2020/schedule/event/testing_one_test_output_format/) by Boris Feld was a fun talk that highlighted output format (although he is trying to solve the input format as well) +* [Testing a large testing software](https://archive.fosdem.org/2020/schedule/event/testing_large_testing_software/) by Linaro Engineer Rémi Duraffort was a great talk on how to mock up your software so you're able to test it. Remi talked about Linaro's Automated Validation Architecture (LAVA) [](https://www.lavasoftware.org/)and how it is becoming the de facto standard to test hardware. +* [How to fail successfully and reliably](https://fosdem.org/2020/schedule/event/testing_fail_successfully_reliably/) by Saleem Siddiqui was an interesting high level talk. Key takeaway was “to fail fast” and “fail without regrets”. That and “bikeshedding”. +* [Writing Go(od) Tests](https://archive.fosdem.org/2020/schedule/event/testing_writing_go_tests/) by Nikki Attea was a great talk about test driven development. + +For FOSDEM 2021, I’d like to see more devrooms for testing like “Automated firmware and kernel testing”. “Automated testing” can be the devroom that covers broader talks about how to harmonise test output formats. + +In addition to the devrooms, Linaro will look at doing a testing summit a few days before or after FOSDEM 2021 where we can drive more discussion to help find synergies between all the different projects. If you’re interested in getting involved, please reach out! diff --git a/src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx b/src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx new file mode 100644 index 0000000..8dda71e --- /dev/null +++ b/src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx @@ -0,0 +1,22 @@ +--- +title: Linaro Connect Budapest 2020 cancelled +date: 2020-02-20T08:53:19.000Z +image: linaro-website/images/blog/48784720458_63040ac998_k +tags: + - linaro-connect +author: connect +related: [] + +--- + +Over the last few weeks, Linaro has been carefully monitoring the Coronavirus situation. We have a duty of care for all attendees at our events. Health and safety is always our top priority. + +With that in mind, it is with great regret that we have decided to cancel the upcoming Linaro Connect which was due to be held on 23-27 March 2020 at the Corinthia Hotel in Budapest, Hungary. + +If you have any questions or require further assistance please contact connect@linaro.org and we will work diligently to get back to you as quickly as possible. + +We are working on finalizing the details for the next Linaro Connect and will communicate them in the near future. + +We extend our sympathies to all those affected by the virus in China and around the world. + +The Linaro Connect Planning Team diff --git a/src/content/blogs/linaro-contributes-to-the-openstack-community-ci-officially-supporting-openeuler.mdx b/src/content/blogs/linaro-contributes-to-the-openstack-community-ci-officially-supporting-openeuler.mdx new file mode 100644 index 0000000..fc38174 --- /dev/null +++ b/src/content/blogs/linaro-contributes-to-the-openstack-community-ci-officially-supporting-openeuler.mdx @@ -0,0 +1,62 @@ +--- +title: Linaro contributes to the OpenStack community CI officially supporting + openEuler +description: "At the end of 2021, openEuler entered the list of Openstack + official CI test operation systems. In this blog, Linaro Engineers Kevin Zhao + and Xinliang Liu talk about the work involved in making this happen. " +date: 2022-01-20T09:49:21.000Z +image: linaro-website/images/blog/openstack-special-interest-group +tags: + - ci + - open-source +author: joyce-qi +related: [] + +--- + +During the last day of 2021, the openEuler OpenStack SIG delivered a 2022 New Year’s gift to the developers in the OpenStack and openEuler open source communities: openEuler has successfully entered the list of OpenStack official CI test operation systems, and [DevStack, the most widely used by OpenStack developers, officially supports openEuler](https://review.opendev.org/c/openstack/devstack/+/760790)! + +openEuler is an open source Linux distribution platform based on CentOS. In this blog, Linaro Engineers Kevin Zhao and Xinliang Liu talk about how [Linaro helped drive the work needed to get the OpenStack community CI to officially support openEuler](https://mp.weixin.qq.com/s/7nqjsrBoynAOfuwonyJ8Hg). + +![Image of OpenEuler Upstream Process](/linaro-website/images/blog/openeuler-image) + +### Upstreaming code and the need to guarantee quality on different platforms + +When developing upstream software locally, developers usually select the corresponding architecture, hardware, operating system and other related software and hardware first based on their target scenarios. After completing local development and verification, they will submit the code to the upstream community. The upstream community will then usually provide an automated CI verification mechanism which comprehensively verifies the code submitted by the developer. Only after the code has been verified and approved by the upstream community maintainer can it be incorporated into the upstream community mainline. + +In order to ensure the quality of openEuler, we needed to push it into the upstream community as part of the CI verification mechanism. Without this verification mechanism, there would be no guarantee as to the quality of the development activities carried out on software and hardware. To further support testing of different types of hardware and operating systems, the openEuler community also released software packages suitable for different platforms and related usage and migration guidance for the upstream community. These tools provide users on different platforms with convenient and reliable solutions, which greatly facilitate the use of openEuler. + +### How it all started - OpenStack agrees to support openEuler + +Let’s review how the OpenStack upstream community worked to support openEuler. At the China Open Source Hackathon in Q4 2020, OpenStack and openEuler developers from Huawei and Linaro discussed the possibility of OpenStack to support openEuler. After two days of onsite development, they completed the POC prototype verification and demonstrated the achievement, which proved the basic usability of OpenStack + openEuler. + +![China Open Source Hackathon](/linaro-website/images/blog/china-open-source-hackathon) + +In early 2021, developers from Huawei, Linaro, Unicom Digital and China Telecom established the OpenStack SIG in the openEuler community, dedicated to better combining the two OpenStack and openEuler open source communities to provide users with an open and reliable cloud infrastructure stack. Linaro developers undertook the task of promoting openEuler support in the OpenStack upstream community, and officially opened related technical discussions, which involved the Infra SIG and Multi-Arch SIG‘s reports and discussions in the OpenStack community. The community began to recognize the influence and activity of openEuler in the field of operating systems, the open governance of the openEuler community, and the technical capabilities of the members of the openEuler OpenStack SIG. After careful consideration, the OpenStack community agreed to provide openEuler support, with plans to support x86 and aarch64 multi-architectures. + +![China Open Source Hackathon image 2](/linaro-website/images/blog/china-open-source-hackathon-image-2) + +### What have we achieved with openEuler so far? + +In the middle of 2021, Linaro engineer Xinliang Liu completed the [openEuler image build in the OpenStack upstream community](https://review.opendev.org/c/openstack/diskimage-builder/+/784363), which made the foundation for openEuler support. At the same time, developers from Huawei completed the related work of openEuler accessing the OpenStack upstream CI resource pool. Finally, the [introduction of openEuler into the OpenStack community was officially completed](https://zuul.opendev.org/t/openstack/job/devstack-platform-openEuler-20.03-SP2). Now the OpenStack upstream community not only has CI to guarantee the quality of openEuler, but users can also quickly deploy a set of OpenStack environments based on openEuler through DevStack. + +DevStack is an OpenStack rapid deployment kit officially developed by the OpenStack community. It is used to quickly build a complete OpenStack environment based on the latest version or specified version of git master. It is a necessary development kit for daily OpenStack developers, and all the CI tests of [all OpenStack projects are using DevStack to do the corresponding environment deployment](https://docs.openstack.org/devstack/latest/#quick-start). Now that DevStack supports openEuler, it not only provides a great help for the development work of OpenStack and openEuler developers, but also provides a technical foundation for the verification of the upstream CI of more projects in OpenStack on openEuler. + +At the same time, openEuler OpenStack SIG has completed the adaptation, verification and software packages of OpenStack core components of Queens, Rocky, Train, Victoria, Wallaby, etc. in multiple versions of openEuler 20.03 LTS, 21.03, 21.09, etc. [The release work](https://gitee.com/openeuler/openstack) provides openEuler users with easy-to-use and useful OpenStack software. In the future, we will continue to work to promote the integration and verification of openEuler by the main component communities in OpenStack,as well as the adaptation and tuning of each component on openEuler,and the integration with openEuler community innovation projects. + +![Openstack Special Interest Group](/linaro-website/images/blog/openstack-special-interest-group) + +The openEuler access to the OpenStack community is the cooperation of many developers from the two communities. I would like to express my gratitude to the contributors: + +Open Infrastructure Foundation: Clark Boylan、Ian Wienand、Jeremy Stanley、李昊阳、Rico Lin +OpenStack QA SIG: Dr. Jens Harbott、Radosław Piliszek, +openEuler OpenStack SIG: 陈锐、陈硕、黄填华、李昆山、李佳伟、Xinliang Liu(Linaro)、刘胜、王玺源、姚志聪、张迎、张帆、赵帅(Kevin Zhao Linaro)、郑振宇 + +### Where can I find out more about openEuler? + +For more information on the work Linaro does on openEuler, please check : + +* Support OpenEuler in OpenStack Disk Image Builder:[https://linaro.atlassian.net/browse/EULR-10](https://linaro.atlassian.net/browse/EULR-10) +* Support openEuler in OpenStack Devstack and enable basic tempest test: + [https://linaro.atlassian.net/browse/EULR-11](https://linaro.atlassian.net/browse/EULR-11) +* openEuler website:[https://www.openeuler.org/en/](https://www.openeuler.org/en/) diff --git a/src/content/blogs/linaro-contributions-to-the-5-17-linux-kernel-release.mdx b/src/content/blogs/linaro-contributions-to-the-5-17-linux-kernel-release.mdx new file mode 100644 index 0000000..9cce75b --- /dev/null +++ b/src/content/blogs/linaro-contributions-to-the-5-17-linux-kernel-release.mdx @@ -0,0 +1,80 @@ +--- +title: Linaro contributions to the 5.17 Linux Kernel Release +description: In this blog we talk about Linaro's contributions to the 5.17 Linux + Kernel Release. +date: 2022-03-12T09:04:13.000Z +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - linux-kernel + - open-source +author: linaro +related: [] + +--- + +# Introduction + +Last week the 5.17 Linux Kernel release took place. As always, Linaro featured in the top ten companies in terms of changesets and lines changed. + +![5.17 Most Active Employers](/linaro-website/images/blog/5.17-most-active-employers) + +So how did we end up in the top ten? We reached out to our Kernel Engineers to find out more about the work they did which landed us in the top ten. We have also taken a look at our testing contributions which help ensure the quality of the Linux kernel. + +# Kernel Contributions + +## Linus Walleij + +In v5.17 Linus developed structures for operating system-controlled battery charging using extended CC/CV algorithms, made the thermistor HWMON (Hardware Monitoring) driver generic and reusable by e.g. ACPI systems, fixed some problems in the Zinitix touchscreen driver, added reset line bindings to a few Arm PrimeCells and continued the cleanup work for the Intel XScale StrongARM IXP4xx platforms which will be concluded in the v5.18 kernel. + +## Dmitry Baryshkov + +In this release, Dmitry’s work concentrated on cleaning up the Qualcomm Display driver (MSM DRM). A significant number of patches were necessary to open the gates for the virtualized DRM planes ([LVC21F-108 Advanced KMS: virtualized planes as a way to hide hardware implementation details](https://resources.linaro.org/ru/resource/KdJRxQgh8NG3J4ssja9qHe)), which are expected to land in the 5.19 release. Other MSM DRM patches included the removal of old unsupported eDP (Embedded DisplayPort) code and other minor cleanups. Another set of patches added power domain code and PCIe PHY support for the Qualcomm Snapdragon 8 Gen 1 Mobile Platform (SM8450). Full support for the PCIe bus on this platform is expected in the 5.18 kernel. The final set of changes were rather minor fixes for the older Qualcomm platforms (APQ8096, MSM8916/8994/8996). + +## Vinod Koul + +Most of the work done in the kernel 5.17 release involved adding support for the Snapdragon 8 Gen 1 Mobile Platform (SM8450) platform, which is the latest Qualcomm SoC, announced in December 2021. These patches add clock, pinctrl, regulator, interconnect, IOMMU, UFS and USB driver support along with relevant device tree updates and support for the Qualcomm SM8450 Reference hardware platform (QRD). Vinod’s work on the Snapdragon 8 Gen 1 Mobile platform also landed him in the list of most active developers to the 5.17 kernel release in terms of changed lines. + +![5.17 Most Active Developers](/linaro-website/images/blog/5.17-most-active-developers) + +## Sam Protsenko + +A portion of Sam’s patches were applied for WinLink E850-96 board support, as well as for Samsung Exynos850 SoC. Now there is minimal viable support for that board merged in the mainline kernel, which is enough to boot the E850-96 up to the serial console, using some rootfs as a RAM disk. + +Basic platform features like eMMC, watchdog, RTC, I2C, HS I2C, serial, etc, are already functional with this patch set. Some new drivers were added, like Exynos850 clock driver, USIv2 driver, etc. The work of course also includes some generic fixes and related additions to existing Exynos drivers and Device Tree bindings. + +## Arnd Bergmann + +Arnd reworked the architecture specific code backing the futex() system call to be more general, avoiding runtime detection of the feature that sometimes caused problems on 32-bit Arm systems. + +In the dmaengine subsystem, old code that used an incompatible method to describe the relation between a DMA engine hardware block and its client device was cleaned up. + +## Bjorn Andersson + +In addition to a few bug fixes, Bjorn's contributions to v5.17 consisted of a new driver for Embedded DisplayPort PHY found in e.g. the Qualcomm 8cx platform and a PWM-chip implementation in the TI SN65DIS86 DSI/eDP bridge driver to be used for backlight control. Bjorn also picked up the maintainership of Qualcomm clock drivers. + +# Testing Contributions + +Linaro consistently ranks in the top ten companies when it comes to reviews, testing and reporting of regressions. We asked Linux Kernel Validation Engineer Naresh Kamboju to share some statistics on our contributions. + +## Reviewed-by + +Around 83 companies contributed their works to this kernel release and +Linaro secured [8th position in “Reviewed-by”](https://remword.com/kps_result/5.17_review.html). Linaro committed 269 reviewed-by, a 11% improvement from the previous release. + +![Reviewed by stats for 5.17 kernel release](/linaro-website/images/blog/reviewed-by-stats-5.17-kernel-release) + +## Tested-by + +Around 57 companies chose to contribute bytesting patches that went into this kernel release. Linaro secured [7th position in “Tested-by](https://remword.com/kps_result/5.17_test.html)”, having tested 34 patches - an 18% improvement from the previous release. + +![Tested by stats for 5.17 kernel release](/linaro-website/images/blog/tested-by-stats-5.17-kernel-release) + +## Reported-by + +Around 62 companies reported regressions in this kernel release with Linaro securing 11th position \[3]. Linaro reported fewer regressions in the 5.17 release than in the 5.16 release and is continuously working to improve its capabilities in reporting early build and test regressions through [Linux Kernel Functional Testing (LKFT)](https://lkft.linaro.org/) - a Linaro project. + +![Reported by stats for 5.17 kernel release](/linaro-website/images/blog/reported-by-5.17-kernel-stats) + +# Conclusion + +As the statistics in this blog show, Linaro’s engineers continue to make an impact in advancing the Arm software ecosystem through feature enablement, testing and maintenance. To find out more about Linaro’s role in the Linux kernel, check out our [Upstream Maintainership project page](https://linaro.atlassian.net/wiki/spaces/UM/overview). diff --git a/src/content/blogs/linaro-contributions-to-the-5-18-linux-kernel-release.mdx b/src/content/blogs/linaro-contributions-to-the-5-18-linux-kernel-release.mdx new file mode 100644 index 0000000..9eb4f13 --- /dev/null +++ b/src/content/blogs/linaro-contributions-to-the-5-18-linux-kernel-release.mdx @@ -0,0 +1,46 @@ +--- +title: Linaro contributions to the 5.18 Linux Kernel Release +description: "In this blog we talk about Linaro's contributions to the Linux + kernel 5.18 release. " +date: 2022-06-01T09:22:01.000Z +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - linux-kernel + - open-source +author: linaro +related: [] + +--- + +The 5.18 Linux Kernel release took place at the end of May. As always, Linaro featured in the top ten companies in terms of changesets and lines changed (based on statistics [pulled together here by LWN](https://lwn.net/Articles/895800/)). + +![Most active linux kernel 5.18 employers](/linaro-website/images/blog/linux-kernel-5.18-release-active-employers) + +We asked our Kernel Engineers to talk about their contributions which helped land Linaro in the top ten. + +## Linus Walleij + +Linus Walleij featured in the top ten list for most active 5.18 developers in terms of changed lines. +He has been working on finalizing the conversion of the SPI (Serial Peripheral Interface) subsystem to use GPIO (General Purpose Input/Output) descriptors exclusively, prompted by the recent re-use of early 2000s Samsung SPI IP in the Tesla Full Self Driving (FSD) computer. The IXP4xx platform was stepwise moved to device tree and multiplatform support (finalized for v5.19). Linus contributions also involved several improvements to the battery charging code in the kernel, where the kernel needs to handle battery charging tasks normally handled by an autonomous ASIC, leading up to paying back technical debt left behind since the early days of device tree support in 2013. + +![Most active linux kernel 5.18 developers](/linaro-website/images/blog/linux-kernel-5.18-most-active-developers) + +## Krzysztof Kozlowski + +Krzysztof Kozlowski was working on converting Devicetree bindings to DT schema format where he reached almost full coverage of the Samsung Exynos SoC bindings with the new format. With that came a lot of fixes and corrections for DTS files (not only Samsung Exynos), mostly pointed out by the schema itself. Krzysztof also squashed a few bugs in different drivers and the NFC stack, and improved the code quality of some other pieces. Beside patches, Krzysztof also performed many reviews as a co-maintainer of the Devicetree bindings. + +## Arnd Bergmann + +Arnd Bergmann contributed two important sets of cleanup patches: The set\_fs()/get\_fs() interfaces in the kernel are now [removed](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=16477cdfefdb494), completing [work started by Christoph Hellwig](https://lwn.net/Articles/832121/). These date back to linux-0.10 from 1997 and previously led to a class of security bugs when used incorrectly. The code was already eliminated from x86 and arm architectures but is now gone from all architectures. On the 32-bit Arm architecture, the final three platforms now use the generic interrupt entry code. This was a prerequisite for [a series by Ard Biesheuvel](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/arch/arm/kernel?id=9c0e6a89b59) to enable the use of separate IRQ stacks across all Arm machines for improved reliability, as well as helping with the multiplatform conversion patches that are [now queued](https://lore.kernel.org/linux-arm-kernel/CAK8P3a3gqQbZG5gdh_cRmGx8B6XR8CGYcXN7wMu-YmCBwD1wGQ@mail.gmail.com/) for linux-5.19. + +## Vinod Koul + +Vinod Koul contributed towards the Qualcomm Snapdragon SM8450 SoC by adding support for HDK board, interconnects and enabling driver configs. GPI DMA support in I2C driver was also added along with support for GPI DMA for Qualcomm Snapdragon SDM845. Wearing his maintainer hat there were subsystem changes for dmaengine and phy subsystems too. + +## Shawn Guo + +Shawn Guo mostly contributed a driver for the Qualcomm MSM Power Manager (MPM), which can be found on a few Snapdragon SoCs based on the Resource Power Manager (RPM) architecture like MSM8939, SDM660 and QCM2290. With this irqchip driver, it becomes possible to wake up these SoCs from VddMin low power state. + +## Conclusion + +The engineers featured in this blog were asked to contribute content based on the fact that they had either contributed a significant number of patches or reviewed patches in the 5.18 kernel release. This only goes to show how Linaro’s engineers continue to make an impact in advancing the Arm software ecosystem through feature enablement, testing and maintenance. You can find out more about Linaro’s role in the Linux kernel by checking out our [Upstream Maintainership project page](https://linaro.atlassian.net/wiki/spaces/UM/overview). diff --git a/src/content/blogs/linaro-contributions-to-the-linux-kernel-5-16-release.mdx b/src/content/blogs/linaro-contributions-to-the-linux-kernel-5-16-release.mdx new file mode 100644 index 0000000..7cc0a4c --- /dev/null +++ b/src/content/blogs/linaro-contributions-to-the-linux-kernel-5-16-release.mdx @@ -0,0 +1,61 @@ +--- +title: Linaro contributions to the Linux Kernel 5.16 Release +description: > + In this blog, we asked the Linaro developers to talk about the contributions + and impact they made to the Linux kernel 5.16 release. Read about the release + here. +date: 2022-01-18T09:36:32.000Z +image: linaro-website/images/blog/careers-image-1 +tags: + - linux-kernel + - arm +author: linaro +related: [] + +--- + +The 5.16 kernel release was published last week and featured several Linaro developers in the top twenty contributors to the Linux kernel (as highlighted in [LWN.nets development statistics](https://lwn.net/Articles/880699/)) - both by changesets and changed lines. + +![5.16 Kernel Release Most Active Developers](/linaro-website/images/blog/5.16-kernel-release-active-developers) + +In this blog we asked the Linaro developers featured to talk about the contributions they made to the Linux kernel 5.16 release. + +### Arnd Bergmann - Randomized kernel configurations for the purpose of regression testing + +Arnd spent some time on building randomized kernel configurations for the purpose of regression testing. As a result of this, there are over 80 bugfixes from him in linux-5.16, about half of those for Arm specific code and drivers, and the others spread around all subsystems of the kernel. + +Most of his other contributions this time were for merging branches from downstream maintainers. 5.16 was an unusually large release, and at the time of release there were over 100 branches and over 1000 non-merge commits that Arnd forwarded for inclusion in the mainline kernel. Detailed information about the merged contents is as usual available in the pull request messages, for linux-5.16 SoC contents see [https://lore.kernel.org/linux-arm-kernel/CAK8P3a2FokRce-oN3dRJPihmDPWuqgWfWg1FNG6WKpWiUa4eNQ@mail.gmail.com/t/](https://lore.kernel.org/linux-arm-kernel/CAK8P3a2FokRce-oN3dRJPihmDPWuqgWfWg1FNG6WKpWiUa4eNQ@mail.gmail.com/t/) + +In comparison, the 5.17 merge cycle has started with less than 800 Arm SoC specific patches, but it does include a number of new SoCs including the recently announced Snapdragon 8 Gen 1. For details about the coming contents of 5.17, see: + +[https://lore.kernel.org/linux-arm-kernel/CAK8P3a0RDZpLtWjMEU1QVWSjOoqRAH6QxQ+ZQnJc8LwaV7m+JQ@mail.gmail.com/t/](https://lore.kernel.org/linux-arm-kernel/CAK8P3a0RDZpLtWjMEU1QVWSjOoqRAH6QxQ+ZQnJc8LwaV7m+JQ@mail.gmail.com/t/) + +### Dmitry Baryshkov - Refactoring of interconnect drivers for Qualcomm platforms + +The largest part of Dmitry’s contributions during this cycle is related to refactoring for the interconnect drivers for some of the Qualcomm platforms (sdm660, msm8916, msm8939). These changes have generified support for QoS (Quality of Service) settings for the in-chip bandwidth management. This improves support and performance of older (but still used) platforms. Dmitry’s other contributions were mostly concentrated around the Qualcomm Display driver (MSM DRM), Qualcomm clock and MPP (multi-purpose pin) controllers. + +### Srinivas Kandagatla - Support for Qualcomm "AudioReach'' signal processing framework + +Srinivas’ contributions are related to Qualcomm audio drivers support. In particular the support for the Qualcomm "AudioReach'' signal processing framework was merged in Linux 5.16. This new framework is an integral part of Qualcomm next generation audio SDK and this will be deployed on all new Qualcomm chipsets, such as the recently announced [Snapdragon 8 Gen 1 mobile platform](https://www.qualcomm.com/products/snapdragon-8-gen-1-mobile-platform). Upstream support to this framework makes use of ASoC Topology to load graphs on to the DSP (digital signal processor) which is then managed by APM (Audio Processing Manager) service to prepare/start/stop. This should also provide end users more flexibility to build graphs as per the use case and include vendor specific DSP modules.In addition to the support for AudioReach, Srinivas contributed various fixes in legacy Qualcomm Audio Codecs drivers. + +### Shawn Guo - Adding the initial Qualcomm [QCM2290](https://www.qualcomm.com/products/qcm2290) chipset support + +Shawn’s main contribution to the 5.16 Linux kernel release involved adding the initial Qualcomm [QCM2290 ](https://www.qualcomm.com/products/qcm2290)chipset support. This entry-level SoC is newly introduced by Qualcomm as a cost-effective solution for retail point-of-sale (POS), industrial handheld, tracking and camera applications. As a result of Shawn’s work, drivers for QCM2290 Clock, Pinctrl, Regulator and USB PHY are available with the 5.16 kernel, and the kernel should be booting on the platform with a simple device tree. Along the way of adding QCM2290 support, Shawn also fixed several device tree bindings and DTS related to QMP PHY support. + +![5.16 Kernel Release Top Signoffs](/linaro-website/images/blog/top-signoffs-in-5.16) + +### Bjorn Andersson - Qualcomm SoC, remoteproc and rpmsg maintainership + +Bjorn's presence on the top signed-off-by list is the result of a good amount of community contributions related to the Qualcomm platforms, as well as the remoteproc and rpmsg frameworks. + +Among the changes in the Qualcomm space, device trees were introduced for Samsung Galaxy S4 Mini Value Edition, Xiaomi Mi 5, Xiaomi Mi Note 2, Sony Xperia XZ1 Compact, Sony Xperia XZ Premium, Sony Xperia XZ1, two revisions of Google Homestar, Google Herobrine, Sony Xperia 10 III and the Fairphone FP4. + +The Qualcomm Snapdragon 410 (MSM8916) got cleaned up, improved and now supports running in 32-bit mode as well. Qualcomm IPQ6018 got USB support, Asus Zenfone 2 Laser got sensor, touchscreen and uSD-card support. Among other things, the Snapdragon 7c Gen 3 got PCIe, QFPROM, GPU, QSPI, IPA, interconnect, coresight and USB support. The Qualcomm Snapdragon 660 SoC got video encoder/decoder support. The Qualcomm Snapdragon 835 Mobile Platform (MSM8998) got GPU and the Qualcomm Snapdragon 845 SoC got LMh support. Qualcomm Snapdragon 855 (SM8150) got FastRPC, as did Qualcomm Snapdragon 888 (SM8350), which also got cluster idling support. + +Support for the new GPR protocol, used by Qualcomm's new audio stack, a driver for exposing platform sleep statistics, SMP2P feature negotiation were introduced as well, as was, additional platform support in the RPM power-domain, LLCC and socinfo drivers. + +In remoteproc support for controlling the Mediatek MT8195 SCP, Meson AO ARC, Qualcomm Snapdragon 7c Gen 3 modem co-processors was introduced, along with a range of cleanups, general improvements and bug fixes. Similar cleanups and general improvements were seen in the rpmsg subsystem. + +![5.16 Kernel Release Most Active Employers](/linaro-website/images/blog/5.16-kernel-release-active-employers) + +Linaro as an employer featured sixth by changesets and seventh by lines changed. These statistics demonstrate the crucial role Linaro’s highly skilled kernel developers continue to play in maintaining and improving the Arm software ecosystem. diff --git a/src/content/blogs/linaro-developer-cloud-kubernetes-as-a-service.mdx b/src/content/blogs/linaro-developer-cloud-kubernetes-as-a-service.mdx new file mode 100644 index 0000000..1781471 --- /dev/null +++ b/src/content/blogs/linaro-developer-cloud-kubernetes-as-a-service.mdx @@ -0,0 +1,69 @@ +--- +title: Linaro Developer Cloud Supports Kubernetes as a Service +description: Linaro Developer Cloud has supported Kubernetes as a Service, and + we have finally passed all the conformance tests. Read more here. +date: 2020-07-24T12:55:00.000Z +image: linaro-website/images/blog/code_highway_under_2mb +tags: + - arm + - datacenter +related_projects: + - CLOUD +author: kevin-zhao +related: [] + +--- + +## **Linaro Data Center Group (LDCG)** + +The Cloud Infrastructure team in Linaro sits inside a group known as the Linaro Data Center and Cloud Group (LDCG). The Cloud Infrastructure focuses on open-source cloud IAAS, PAAS, and storage projects such as OpenStack, Kubernetes, and Ceph. The rest of LDCG spend their time working with Arm Server Architecture, Big Data, and HPC (High-Performance Computing). + +## About Linaro Developer Cloud + +Linaro Developer Cloud is designed to broaden the availability of the latest hardware to developers globally and to enable commercial and private cloud providers to utilize the implementation to accelerate deployment of their own offerings. + +Linaro Developer Cloud is based on OpenStack, Ceph and Kubernetes, leveraging both Debian and CentOS, as the underlying cloud OS infrastructure on top of ARM-based server platforms from Linaro members such as Huawei and Marvell, etc. + +![class=small-inline left Openstack icon](/linaro-website/images/blog/openstack) + +Linaro Developer Cloud has been certified as OpenStack Powered Cloud (https://www.openstack.org/marketplace/public-clouds/linaro/linaro-developer-cloud) for more than three years, and we are engaged in OpenStack and Ceph upstream to make OpenStack and Ceph easy to run and deploy on Arm64 platforms. + +Recently, Linaro Developer Cloud has supported Kubernetes as a Service, and we have finally passed all the conformance tests from CNCF as the “[Certified Kubernetes - Hosted](https://landscape.cncf.io/card-mode?selected=linaro-developer-cloud-kubernetes-service)”, together with a bunch of famous cloud providers, such as AWS EKS, Azure AKS, Huawei Cloud CCE and etc. + +![LDC Kubernetes Service makes it easy for you to deploy, manage and scale Kubernetes clusters to run containerised applications on the Arm64 platform](/linaro-website/images/blog/ldc-kubernetes-service) + +## Kubernetes Service in Linaro Developer Cloud + +Kubernetes Service now is more and more popular by cloud providers, and it makes users flexible to have a Kubernetes cluster for their daily development and testing so that it is significant for us to support it on the open-source Arm64 platform. + +The total architecture for adding Kubernetes is as below. There are changes both from the Infrastructure side - OpenStack and Kubernetes cluster side. + +![Kubernetes Architecture](/linaro-website/images/blog/kubernetes-architecture) + +We have leveraged three important OpenStack services: + +**Magnum**, which is the provisioning and life cycle management service for Kubernetes. Octavia, which is offering network load balancing service for APPs that will run on the Kubernetes cluster + +**Heat**, which is the orchestrated service for Magnum to configure the cert, network, security group, and provisioning the VM and storage for Kubernetes launching. + +At Kubernetes’ side, the cluster also needs to utilize the volume, load balancer, and authentication support from OpenStack. Some important controllers have been integrated as below: + +**K8s-keystone-auth:** This controller will provide the authorization and authentication abilities from OpenStack Keystone to the Kubernetes cluster. + +**Cinder-CSI-driver:** The controller is to connect with Cinder to offer the volume support for Pod running inside the Kubernetes cluster. + +**Octavia-ingress-controller:** The controller is to talk with Octavia load balancer service to expose the application to the outside world. + +With those components support, users can easily launch a Kubernetes cluster with one simple API call or use the Web UI. + +![Kubernetes Web UI](/linaro-website/images/blog/kubernetes-web-ui) + +For more technical detailed information, we have a session “[Kubernetes as a Service - The Open Source Cloud on Arm64](https://ossna2020.sched.com/event/c3Yh/kubernetes-as-a-service-open-source-cloud-on-arm64-kevin-zhao-xinliang-liu-linaro)” that was presented at Open source Summit North America virtual summit 2020. + +## CNCF Certified Kubernetes Service + +CNCF has the Certified Kubernetes program for different vendors’ Kubernetes service to make sure their service consistency and confirmability. The software conformance test ensures that every vendor’s version of Kubernetes supports the required APIs, as do open source community versions. For organizations using Kubernetes, conformance enables interoperability from one Kubernetes installation to the next. It allows them the flexibility to choose between vendors. + +Linaro Developer Cloud Kubernetes service now can offer the Kubernetes version 1.17 with OS Fedora-Coreos-Dev-Arm64. Now, it has passed the CNCF conformance test suites which includes more than 280 test cases and was [certified by CNCF](https://landscape.cncf.io/card-mode?selected=linaro-developer-cloud-kubernetes-service). We are the first Arm64 open source cloud to pass the CNCF conformance test and gain this certification. With this certification, Arm64 open-source platform + +![class=small-inline left Certified Kubernetes logo](/linaro-website/images/blog/certified-kubernetes) diff --git a/src/content/blogs/linaro-developers-make-an-impact-in-linux-kernel-5-13-release.mdx b/src/content/blogs/linaro-developers-make-an-impact-in-linux-kernel-5-13-release.mdx new file mode 100644 index 0000000..44b6f10 --- /dev/null +++ b/src/content/blogs/linaro-developers-make-an-impact-in-linux-kernel-5-13-release.mdx @@ -0,0 +1,62 @@ +--- +title: Linaro Developers make an impact in Linux Kernel 5.13 release +description: "In this blog, we asked Linaro's kernel engineers who were featured + in the top twenty to talk about their contributions to the 5.13 kernel + release. " +date: 2021-07-01T01:23:22.000Z +image: linaro-website/images/blog/services_board +tags: + - linux-kernel + - open-source +author: linaro +related: [] + +--- + +The 5.13 kernel release was published last week and featured Linaro developers yet again in the top twenty contributors to the Linux kernel (as highlighted in [LWN.nets development statistics](https://lwn.net/Articles/860989/)) - both by changesets and changed lines. + +![Most active 5.13 kernel developers](/linaro-website/images/blog/5.13-kernel-stats) + +In this blog we asked the Linaro developers featured to talk about the contributions they made to the Linux kernel 5.13 release. + +### **Lee Jones - Fixing Compiler and Doc build warnings throughout the tree** + +For the past four out of five releases, Lee Jones was the top contributor for changesets. Fixing compiler and doc build warnings throughout the tree enables maintainers and testers to increase the warning level when test building their associated subsystems, leading to more issues being caught earlier on during the development process. When this work started, there were more than 20k level-1 (W=1) issues residing in the kernel. Now there are fewer than 2.5k. Lee will continue this work until there are as close to 0 as feasibly possible. + +### **Arnd Bergmann - SoC tree work** + +Arnd is one of the maintainers responsible for merging all platform specific patches for Arm based machines into the mainline kernel, and [his pull requests](https://lore.kernel.org/linux-arm-kernel/CAK8P3a2RjRSjTcmwVf3VHy2CUB2HBj5AaJTx=1NSYuA_Qy4E-w@mail.gmail.com/T/#u) give a good overview of what is going on in this area. + +During the 5.13 merge window, we saw 863 patches from 195 developers, most of these are about changes to the device tree files of existing platforms that get improved by enabling more devices, cleanups and bug fixes. + +Support for 35 distinct new machines, and six new SoCs is added, which is slightly higher than normal. While in most merge releases there is still a similar amount of work going into 32-bit and 64-bit platforms, this time there is a strong bias towards 64-bit, something that is expected to continue over time, while Cortex-A7 and older 32-bit platforms will remain used on the low end. Three of the new SoC platforms stand out in particular, though in very different ways: + +Initial support for [Apple M1 platform](https://github.com/AsahiLinux/docs/wiki/SW%3ALinux) was added by Hector Martin through the SoC, giving hope for much more capable developer workstations to run Arm Linux in the future, once this support becomes complete enough to be included in distributions. The current state is fairly minimal, but a lot of the harder problems have been resolved and the remaining work is mainly about adding all the device drivers. + +Another noteworthy platform is at the opposite end of the spectrum, the STMicroelectronics STM32H750 microcontroller based on a Cortex-M7 with no MMU, along with support for a developer board. While most new development has moved away from Linux on MMU-less hardware, this is a reminder that machines like this still exist and can be put into productive work. + +Finally, Nuvoton WPCM450 is an older baseboard management controller that got merged through the OpenBMC project. While this is an older SoC based on the 20 year old ARM926 core, it remains popular enough in modern server systems to have developers interested in needing new software for it. + +### **Dmitry Baryshkov - Refactoring DSI PHY code** + +Dmitry is ranked fifth in “the lines changed” column thanks to refactoring one of the pieces of the Qualcomm Display driver (MSM DRM), which for a long time has been asking for tender love and care — the DSI (Display Serial Interface) PHY code. The DSI PHY is a hardware block found in most of the Qualcomm Snapdragon SoCs. It is responsible for physical communication between the SoC and MIPI (Mobile Industry Processor Interface Alliance) DSI display panels or bridges. Dmitry worked on cleaning the code responsible for setting up the MIPI DSI interfaces, managing DSI PHY PLLs and clocks, and removing duplicated code. While this contribution does not bring new features on its own, it provides a good background for future contributions both from other kernel developers and Dmitry. It won’t take long for new features to arrive: DSI PHY register snapshotting is expected to be merged in 5.14 while support for MIPI C-PHY mode (latest MIPI physical interface specification) and fixes for MIPI DSI continuous clock are both targeting the 5.15 Linux kernel. + +Another large contribution from Dmitry is the cleanup of Qualcomm clock drivers for the last SoC generations (sc7180, sc7280, sdm845, sm8180, sm8280 and sm8350), making the code more robust and easy to understand and maintain. + +### **Robert Foss - Implementing Camera ISP support for Qualcomm Robotics RB3 Development kit / SDM845** + +Robert’s contributions can be divided into two separate parts; CAMSS, the Qualcomm Camera ISP driver, and platform enablement for the SM8350 Qualcomm SoC. + +The CAMSS driver contributions targeted adding support for the current generation of Camera ISPs used by Qualcomm SoCs. As well as adding support for this generation of the ISP design, the Qualcomm Robotics RB3 / SDM845 SoC had the CAMSS driver enabled, which will allow for basic camera sensor input. + +The SM8350 SoC enablement revolved around enabling peripheral IP blocks such as Thermal Sensors and the Pseudorandom Generator. + +For more information on the work Robert has done on upstreaming camera support for Qualcomm platforms, [check out this blog](https://www.linaro.org/blog/upstream-camera-support-for-qualcomm-platforms/). + +In addition to several Linaro engineers being featured, Linaro as an employer was also listed sixth for changesets and fourth for changed lines. + +![Most active 5.13 employers](/linaro-website/images/blog/kernel-5.13-employees) + +These stats showcase how Linaro continues to play an important role in maintaining and improving the Arm software ecosystem thanks to the hard work of its highly skilled kernel developers. + +To find out more about the work Linaro does in the Linux Kernel check out our [Upstream Maintainership Project](https://linaro.atlassian.net/wiki/spaces/UM/overview) where we track all our contributions. Or [contact us](https://www.linaro.org/contact/) for more information! diff --git a/src/content/blogs/linaro-developers-top-5-12-kernel-release.mdx b/src/content/blogs/linaro-developers-top-5-12-kernel-release.mdx new file mode 100644 index 0000000..cbe7c25 --- /dev/null +++ b/src/content/blogs/linaro-developers-top-5-12-kernel-release.mdx @@ -0,0 +1,63 @@ +--- +title: Linaro Developers top 5.12 Kernel release +description: In this article, we discuss the 5.12 kernel release, which was + published this week & featured Linaro yet again in the top five contributors + to the Linux kernel. +date: 2021-04-29T00:02:09.000Z +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - linux-kernel + - open-source + - arm +author: linaro +related: [] + +--- + +The 5.12 kernel release was published this week and featured Linaro yet again in the top five contributors to the Linux kernel (as highlighted in [LWN.nets development statistics](https://lwn.net/Articles/853039/)). + +![List of 5.12 Most Active Employers](/linaro-website/images/blog/5.12-most-active-employers) + +In addition to several Linaro engineers being featured in the top twenty contributors to the 5.12 release by changesets and lines changed (which we will talk about later), Linaro also features in the lists for bug reporters (Arnd Bergmann) and Test and Review credits (Linaro assignee Linus Walleij). Code is only accepted into a code base once it has been reviewed and only a few people have the skills to review a core change and make a decision as to whether it can be incorporated or not. For Linaro to be featured in these lists highlights the significant role we play in detecting issues and limiting regressions. + +![Tables showing 5.12 Most Active Bug Reporters and Test and Review Credits](/linaro-website/images/blog/kernel-5.12-bug-reporters-and-test-and-review-credits-) + +As we mentioned earlier, several Linaro engineers feature as top contributors to the 5.12 kernel release. We thought it would be a good opportunity to highlight these engineers and find out what they have been working on. The contributions highlighted in this blog are only a subset of all the work Linaro does on the kernel but help showcase the role we play in maintaining and developing the Arm software ecosystem. + +![List of 5.12 Most Active Developers](/linaro-website/images/blog/kernel-5.12-most-active-developers) + +#### **Lee Jones - Fixing Compiler and Doc build warnings throughout the tree** + +For the past three out of four releases Lee Jones was the top contributor for changesets. He continues to work to fix compiler and doc build warnings throughout the tree, enabling maintainers and testers to increase the warning level when test building their associated subsystems, leading to more issues being caught earlier on during the development process. When this work started, there were more than 20k level-1 (W=1) issues residing in the kernel. Now there are less than 4k. Lee will continue this work until there are as close to 0 as feasibly possible. + +#### **Arnd Bergmann - Deleting support for obsolete architectures** + +Arnd Bergmann was top of the list for “lines changed” as a result of deleting support for several obsolete architectures and their associated drivers. The kernel supports around 70 32-bit Arm platforms and around 40 64-bit Arm platforms today, more than those for all other CPU architectures combined. A platform is usually a family of SoC designs from a single manufacturer that evolved from one original design, and each platform has between one and over 300 individually supported board designs. In most release cycles, support for one or two new platforms gets added, which can be for new chips that are still unreleased, or for older hardware that is gaining community support through its existing users. + +However, not all platforms are supported in the long run, as the developers that initially added the support may move on to other projects, or the product line gets discontinued even before it makes it into users’ hands. In the 5.12 cycle, Arnd found 14 individual platforms that had not seen any notable work for five years or more, and asked the maintainers if those were still needed. Some of these are in active use and just work without changes, some platforms have seen renewed interest as a result of the query, and six platforms turned out to be completely abandoned by both users and developers. Removing these helps ensure that the code base remains relevant and that any ongoing maintenance is done on platforms that still have users. + +The coming 5.13 cycle will include preliminary support for two new Arm platforms, the Apple M1 based on Armv8.5, the Nuvoton WPCM450 based on an Armv5TE core and a new Cortex-M7 based STM32 microcontroller, along with several other new SoCs getting added to the supported platforms. + +#### **Viresh Kumar - Removing oprofile kernel code and taking OPP API to the next level** + +Viresh Kumar is ranked third for the “the lines changed” column by virtue of the amount of code for the 5.12 cycle. He worked on a variety of topics, the most noticeable of which are the removal of oprofile kernel code and taking the OPP API to the next level. + +Oprofile is a statistical profiler for Linux systems, capable of profiling all running code at low overhead. Initially it used the kernel’s OPROFILE interfaces but has been using the perf interfaces for some time now. The OPP (Operating performance Points) framework is a helper library that provides a table of voltage-frequency pairs (with some additional information) for the kernel. Kernel frameworks, like cpufreq and devfreq, use these OPP tables to perform DVFS for the devices. + +During the review of a trivial patch by Viresh Kumar, Linus Torvalds [suggested](https://lore.kernel.org/lkml/CAHk-=whw9t3ZtV8iA2SJWYQS1VOJuS14P_qhj3v5-9PCBmGQww@mail.gmail.com/) to remove the Kernel OPROFILE support as the "oprofile" user-space tools don't use it any more, and haven't in a long time. User-space has been converted to the perf interfaces instead. Viresh took the suggestion to the next logical step and got rid of the kernel OPROFILE support. + +Viresh also added a new API to the OPP core, dev\_pm\_opp\_set\_opp(), after which the OPP core can be used to change OPPs of any device type now, not just CPUs. This required major changes to the structure of the OPP core. + +#### **Bjorn Andersson - Introducing the first set of driver support for Qualcomm Snapdragon 8cx Compute Platform (SC8180x)** + +Bjorn Andersson made it to the list of most active developers by changed lines through the introduction of a first set of driver support for the Qualcomm Snapdragon 8cx (aka SC8180x) laptop/compute platform. The largest pieces were global clock controller and top level mode multiplexer (TLMM) pinctrl, the latter included reworking the DeviceTree binding document to reduce duplication between similar Qualcomm platforms. This set of driver patches is the basis for the work done in the [aarch64-laptops project](https://github.com/aarch64-laptops/debian-cdimage), which serves the purpose of running standard Linux distributions on laptops such as the Lenovo Flex 5G. + +#### **Srinivas Kandagatla - Adding SoundWire Audio support for Qualcomm Snapdragon 865 platform** + +Srinivas Kandagatla is in the list of top twenty contributors by changed lines largely due to the support he added for SoundWire in the Low Power Audio Subsystem (LPASS) found on the recent Qualcomm platforms, such as the Qualcomm Snapdragon 865. It enables support for external codecs like the Qualcomm Aqstic smart speaker amplifier (WSA881x), Qualcomm Aqstic audio codec (WCD937x/WCD938x) or digital microphone, when using the upstream Linux kernel on Qualcomm devices such as the Qualcomm Snapdragon 865 mobile hardware development kit. + +In addition, Srinivas made changes to the ASoC component core driver to improve bitfield handling in an attempt to make drivers' code more readable and less error prone, see newly added functions snd\_soc\_component\_read\_field() and snd\_soc\_component\_write\_field(). + +Thanks to the relentless efforts from its Linux kernel developers, Linaro continues to play a critical role in maintaining and improving the Arm software ecosystem. + +To find out more about the work Linaro does upstream, check out our [Upstream Maintainership project](https://linaro.atlassian.net/wiki/spaces/UM/overview) where we track all our contributions. Or [contact us](https://www.linaro.org/contact/) to find out more! diff --git a/src/content/blogs/linaro-ecosystem-dashboard-a-centralized-information-hub-for-arm-developers.mdx b/src/content/blogs/linaro-ecosystem-dashboard-a-centralized-information-hub-for-arm-developers.mdx new file mode 100644 index 0000000..3659aec --- /dev/null +++ b/src/content/blogs/linaro-ecosystem-dashboard-a-centralized-information-hub-for-arm-developers.mdx @@ -0,0 +1,48 @@ +--- +title: Linaro Ecosystem Dashboard - A centralized information hub for Arm developers +description: This blog talks about Linaro's Ecosystem Dashboard - an information + hub which makes life easier for Arm developers. Read the blog to find out + more! +date: 2022-08-11T09:08:25.000Z +image: linaro-website/images/blog/code_ecosystem +tags: + - arm +author: linaro +related: [] + +--- + +One of Linaro’s main objectives is to ensure that open source projects are enabled for Arm. This allows engineers to develop natively on Arm which ultimately results in better Arm®-based products and a more seamless user experience for the end user. + +When developing natively on Arm it is important to get a view of which projects are supported on Arm platforms. Determining the status of various projects can end up being a time-consuming task for engineers who just need a quick answer. + +# Introducing the Linaro Ecosystem Dashboard + +This is why Linaro has created the [Linaro Ecosystem Dashboard (LED) project](https://ecosystemdashboard.linaro.org/), a central location developers can find information on open source projects and their status. The Linaro Ecosystem Dashboard enables developers to quickly search necessary software project details, including information on supported releases, build status, user stories and more. + +![Image of the Linaro Ecosystem Dashboard Webpage](/linaro-website/images/blog/led-image) + +To date there are 58 projects listed covering areas such as big data, databases, AI, virtualization, cloud, cloud native and storage. The plan is to expand beyond these areas to provide a comprehensive catalogue for the Arm status of open source projects. + +![Image of information listed for Apache Hadoop Project](/linaro-website/images/blog/apache-hadoop-image) + +By including latest information on as many projects as possible, the project aims to support Arm developers. As such, contributions from the community are essential! + +"Collaboration across the open source community is critical and it’s fantastic to see Linaro and its members working together to provide vital insights into the health of key open source software projects on Arm-based platforms. The Linaro Ecosystem Dashboard Project encourages and facilitates native development on Arm by giving software developers a central location to obtain information about latest releases, builds, potential issues and fixes for any given project." - Mark Hambleton, VP of open source software, Arm + +“It is very meaningful to have a platform like the Linaro Ecosystem Dashboard for Arm. It will benefit our open source community including users, developers and anyone else in the ecosystem. We are very honoured to be part of it and welcome more developers from the Arm ecosystem to join the project. Let’s work together to make our ecosystem better!” - Junping Du, General Manager of Huawei Cloud & Computing Open Source Business + +# How to contribute to the Linaro Ecosystem Dashboard + +Do you have a project you’d like to contribute updates for? All you have to do is go to the GitHub repo and submit a pull request. The Project will enable contributions from communities (under Apache-2.0 license) to assist in growing the platform dataset. + +A step-by-step guide to contributing to the Linaro Ecosystem Dashboard + +1. Go to [https://github.com/Linaro/ecosystemlandscape](https://github.com/Linaro/ecosystemlandscape) +2. Directory "\_post" contains all the sub-page info using yaml, so please refer to [this link](https://github.com/Linaro/ecosystemlandscape/pull/12) as a guide for how to fill in the content. +3. Fork the repo and submit to your repo +4. Create a Pull Request and wait for review + +![Example of how to fill out a project on the Linaro Ecosystem Dashboard](/linaro-website/images/blog/led-template-example) + +For more information on the Linaro Ecosystem Dashboard project go to [https://ecosystemdashboard.linaro.org/](https://ecosystemdashboard.linaro.org/). diff --git a/src/content/blogs/linaro-engineering-highlights-april-2020-new-version-2.mdx b/src/content/blogs/linaro-engineering-highlights-april-2020-new-version-2.mdx new file mode 100644 index 0000000..54a0505 --- /dev/null +++ b/src/content/blogs/linaro-engineering-highlights-april-2020-new-version-2.mdx @@ -0,0 +1,152 @@ +--- +title: "Linaro Engineering Highlights: April 2020" +description: Welcome to the April 2020 edition of the Linaro Engineering + Highlights. This roundup includes Linaro’s contributions to the Linux v5.6 + Kernel LTP, PSA Level 1 Certification Showcase, Linaro Consumer Group (LCG) + News, OTA article and some amazing research Linaro has been involved with in + the combat against COVID-19. +date: 2020-05-07T03:47:27.000Z +image: linaro-website/images/blog/code +tags: [] +author: jon-burcham +related: [] + +--- + +Welcome to the April 2020 edition of the Linaro Engineering Highlights. This is a roundup of all of the latest news and developments from last month including:- + +* Linaro’s Contributions to the Linux v5.6 Kernel LTP (Linux Test Project) +* PSA Level 1 Certification Showcase +* Linaro Consumer Group (LCG) News +* OTA article - Industrial Internet Consortium Journal of Innovation +* Protein Folding on Arm Devices - Helping with COVID-19 Research by Sahaj Sarup + +### Linaro’s Contributions to the Linux v5.6 Kernel + +###### **Mark Orvek, VP Engineering** + +![class=small-inline left core-eng.jpg](/linaro-website/images/blog/core-eng) + +As reported by LWN, the latest stable Linux Kernel (version 5.6) was released on March 29th. I wanted to highlight five notable Linaro related statistics from the article; you can read the full LWN article at [Some 5.6 kernel development statistics](https://lwn.net/Articles/816162/): + +* Linaro is the #2 company by number of lines changed and the #6 (known) company by changesets. +* Two Linaro employees (Arnd Bergmann and Srinivas Kandagatla) are #2 and #7 individual contributors by number of lines changed. +* Arnd Bergmann is the #6 individual contributor by changesets. +* Not mentioned in the article but two other key statistics + + * Linus Walleij 7th 1.6% Reviewed-by in 5.6 + * Naresh Kamboju ranked at #10 for reported-by

+ +**Most Active 5.6 Employers** + +| By Changesets | | | By Lines Changed | | | +| ------------------- | ---- | ----- | ------------------ | ----- | ----- | +| Intel | 1694 | 13.4% | Intel | 78083 | 11.5% | +| (Unknown) | 904 | 7.1% | Code Aurora Forum | 68538 | 10.1% | +| AMD | 781 | 6.2% | Linaro | 59492 | 8.8% | +| (None) | 778 | 6.1% | AMD | 44979 | 6.6% | +| SUSE | 713 | 5.6% | Red Hat | 40553 | 6.0% | +| Red Hat | 702 | 5.5% | (Unknown) | 28591 | 4.2% | +| Google | 558 | 4.4% | (None) | 27387 | 4.0% | +| Linaro | 503 | 4.0% | (Consultant) | 23271 | 3.4% | +| Huawei Technologies | 483 | 3.8% | Google | 20038 | 3.0% | +| Facebook | 298 | 2.4% | SUSE | 19274 | 2.8% | +| Mellanox | 252 | 2.0% | Facebook | 17525 | 2.6% | +| Renesas Electronics | 247 | 2.0% | Texas Instruments | 16561 | 2.4% | +| IBM | 232 | 1.8% | Mellanox | 14977 | 2.2% | +| Arm | 231 | 1.8% | Linux Foundation | 12289 | 1.8% | +| Code Aurora Forum | 222 | 1.8% | Marvell | 11678 | 1.7% | +| (Consultant) | 216 | 1.7% | Realtek | 10968 | 1.6% | +| Texas Instruments | 213 | 1.7% | Collabora | 9491 | 1.4% | +| NXP Semiconductors | 210 | 1.7% | NXP Semiconductors | 8689 | 1.3% | +| Oracle | 147 | 1.2% | Solarflare | 8670 | 1.3% | +| Broadcom | 143 | 1.2% | IBM communications | 8586 | 1.3% | + +**Most Active 5.6 Developers** + +| By Changesets | | | By Lines Changed | | | +| ------------------- | --- | ----- | ------------------- | ----- | ---- | +| Takashi Iwai | 406 | 3.2-% | Kalle Valo | 48483 | 7.2% | +| Chris Wilson | 306 | 2.4% | Arnd Bergmann | 29415 | 4.3% | +| Sean Christopherson | 143 | 1.1% | Jason A. Donenfeld | 18664 | 2.8% | +| Jérôme Pouiller | 125 | 1.0% | Ben Skeggs | 13471 | 2.0% | +| Eric Biggers | 122 | 1.0% | Greg Kroah-Hartman | 11931 | 1.8% | +| Arnd Bergmann | 114 | 0.9% | Chris Wilson | 10615 | 1.6% | +| Zheng Bin | 110 | 0.9% | Srinivas Kandagatla | 8739 | 1.3% | +| Geert Uytterhoeven | 103 | 0.9% | Alex Maftei | 8581 | 1.3% | +| Greg Kroah-Hartman | 103 | 0.8% | Maxime Ripard | 7521 | 1.1% | +| Masahiro Yamada | 94 | 0.7% | Peter Ujfalusi | 6970 | 1.0% | +| Colin Ian King | 92 | 0.7% | Tony Lindgren | 6320 | 0.9% | +| Ben Skeggs | 91 | 0.7% | Helen Koike | 5789 | 0.9% | +| Ville Syrjälä | 90 | 0.7% | Takashi Iwai | 5622 | 0.8% | +| Andy Shevchenko | 88 | 0.7% | Shuming Fan | 5604 | 0.8% | +| Russel King | 88 | 0.7% | Michal Kalderon | 5445 | 0.8% | +| Alex Deucher | 86 | 0.7% | Sricharan R | 5065 | 0.7% | +| Krzysztof Kozlowski | 82 | 0.6% | Andrii Nakryiko | 4857 | 0.7% | +| Thomas Zimmermann | 80 | 0.6% | Roman Li | 4852 | 0.7% | +| Jens Axboe | 77 | 0.6% | Thierry Reding | 4845 | 0.7% | +| Jani Nikula | 74 | 0.6% | Sunil Goutham | 4762 | 0.7% | + +Congratulations to Arnd, Srinivas, Linua and Naresh for being top contributors to the 5.6 kernel and a thank you to all those who keep Linaro in the top ten Linux Kernel contributors every release.


+ +### LTP (Linux Test Project) + +![class=small-inline left core-eng.jpg](/linaro-website/images/blog/core-eng) + +Linaro had been asked by the Members to work on the Linux Test Project (aka LTP) and enhance it to cover all the syscalls in the Linux Kernel. With best effort staffing, work had been progressing slowly. In January, Viresh Kumar (KWG) was able to take up this work and put in a sustained effort, quickly closing the gap and adding support for the following syscalls: + +* `pidfd_open` `Io_pgetevents` +* `Fsopen` +* `Fsconfig` +* `Fsmount` +* `Fspick` +* `Open_tree` +* `Move_mount` +* `Clone3` +* `Openat2` + +All of the above have been merged, while work is in progress for three syscalls related to `io_uring` supported by an ARM member engineer. In addition, twenty new syscalls have been added to the task, all related to the time64 variants and these are now underway. Further information is available in [KWG-326.](https://projects.linaro.org/browse/LKQ-43)

+ +### PSA Level 1 Certification Showcase + +Linaro IoT and Embedded (LITE) + +Kevin Townsend, LITE Senior Engineer, completed certification of the TF-M integration with Zephyr. This effort was featured on the [PSA Certified website](https://www.psacertified.org/products/zephyr-project/). + +![class=small-inline left lite.jpg](/linaro-website/images/blog/lite) + +With the latest updates in TF-M and PSA, Kevin is working on a recertification with a Linaro Member board. We will post the news when that recertification is completed.

+ +### **Linaro Consumer Group (LCG) News** + +Tom Gall, Director LCG + +This month the Linaro Consumer Group team released two blog posts highlighting work going on within the segment group and in coordination with our Member companies. + +![class=small-inline left lcg.jpg](/linaro-website/images/blog/lcg) + +The first blog discusses the current state of HiKey and HiKey960 boards and their usefulness as Android Open Source Project (AOSP) development boards. John Stultz talks about the current state of support and how these devices are useful, valued members of the Android development ecosystem. + +* [Update on HiKey/Hikey960 efforts in AOSP](/blog/update-on-hikey-hikey960-efforts-in-aosp/) + +The second blog post is about how the effort enabling the SDM845 on the Dragonboard 845 bloomed into making possible mainline linux kernel development on a consumer form factor Android devices like the Pixel 3 and Poco F1. The efforts also highlight the effectiveness of the Android-5.4 GKI kernel and its ability to boot multiple devices from the same binary. [](/blog/aosp-on-pixel3-pocof1-running-aosp-with-mainline-kernel-on-form-factor-devices/) + +**[AOSP on Pixel3/PocoF1 (Running AOSP with mainline kernels on form factor devices)](/blog/aosp-on-pixel3-pocof1-running-aosp-with-mainline-kernel-on-form-factor-devices/)** + +#### **OTA article - Industrial Internet Consortium Journal of Innovation** + +Francois Ozog, Director LEDGE + +![class=small-inline ledge.jpg](/linaro-website/images/blog/ledge) + +LEDGE has a leadership role in Over-The-Air (OTA) Special Interest Group in the Industrial Internet Consortium and was proposed to author an article on OTA and Intelligent Transport Systems (ITS). It was [published](https://www.iiconsortium.org/news/joi-articles/2020-March-JoI-Why-Are-OTA-Updates-Needed-for-ITS.pdf) on March 27th in the Journal of Innovation web page after circulation for validation amongst LEDGE members. + +The article exposes the challenges of OTA for current and future systems. OTA can have a significant impact in ITS. For instance, there can be an increase in peak power by 5% (resulting in 0-60mph in 2.9s instead of 3.2s). But it comes with many complexities not experienced in the mobile phone market. So many aspects of OTA in ITS need co-innovation and some form of standardization highlighted in the article. + +#### **[Protein Folding on Arm Devices | Helping with COVID-19](https://www.96boards.org/blog/crunch-on-arm/)** + +Research by Sahaj Sarup + +![class=small-inline left 96boards-vertical-logo.png](/linaro-website/images/blog/96boards-vertical-logo) + +Recently I have been spending my spare cycles, along with a few other friends from the Arm Ecosystem, to get the power and efficiency of the aarch64 ISA in the hands of researchers and institutes that have been working tirelessly to make sense of the COVID-19 pandemic.
diff --git a/src/content/blogs/linaro-engineering-highlights-december-2020.mdx b/src/content/blogs/linaro-engineering-highlights-december-2020.mdx new file mode 100644 index 0000000..1ea55c6 --- /dev/null +++ b/src/content/blogs/linaro-engineering-highlights-december-2020.mdx @@ -0,0 +1,410 @@ +--- +title: Linaro Engineering Highlights - December 2020 +description: In this edition of the Engineering Highlights, Jon Burcham takes a + detailed look at the Future of 32-bit Linux, Zephyr security, evolution of + Device Tree and more. +date: 2021-01-08T12:08:24.000Z +image: linaro-website/images/blog/code +tags: + - linux-kernel + - ai-ml + - iot-embedded + - testing +author: jon-burcham +related: [] + +--- + +## Introduction + +In this edition of Linaro's Engineering Highlights we have articles on the Future of 32-bit Linux, Zephyr security, the evolution of Device Tree, SystemReady IR and EBBR and more. + +## The Future of 32-bit Linux + +![Core Engineering icon](/linaro-website/images/blog/core-eng) + +To find an answer, it is worth taking a look at different types of systems supported in Linux today, how they have evolved over time with the introduction of 64-bit processors, why they remain popular, and what challenges these face today and in the future. [Continue reading](https://lwn.net/Articles/838807/) + +## Zephyr Security Update on Amnesia:33 + +**By David Brown, Linaro Security Working Group and Zephyr Security Architect** + +![Zephyr Project icon](/linaro-website/images/blog/zephyr-iot) + +On December 8, 2020, Forescout released a report containing numerous vulnerabilities found in various embedded TCP/IP stacks, known as [AMNESIA:33](https://www.forescout.com/company/blog/amnesia33-forescout-research-labs-finds-33-new-vulnerabilities-in-open-source-tcp-ip-stacks/). These vulnerabilities, across multiple network implementations, concern various memory and overflow errors, some of which are readily exploitable. [Continue reading](https://www.zephyrproject.org/zephyr-security-update-on-amnesia33/) + +## Device Tree Evolution and SystemReady IR + +**By Bill Mills, Linaro** + +![Device Tree.org icon](/linaro-website/images/blog/devicetree-logo_vertical-devicetree) + +#### **Introduction** + +This article will present one aspect of the Devicetree evolution project that is being worked on now: How does devicetree (DT) fit into the scope of SystemReady IR and what is the importance? This exploration is our current focus on the devicetree evolution calls. We hope to have agreement on this topic so work can begin in the April to October 2021PoR cycle. + +For good background information on devicetree and the devicetree evolution project, please see this white paper from 2019: [Linaro Devicetree Evolution](https://www.linaro.org/assets/pdf/Linaro-White-Paper--Device-Tree-Evolution.pdf). + +## SystemReady IR and EBBR + +SystemReady is a program from Arm to enable multiple operating system versions to “just work” on many Arm platforms. The key to these programs is to draw a well defined line between what is considered the operating system (OS) and what is considered the firmware and to ensure the interfaces between the two are stable from platform to platform and model year to model year. In this way old platforms (with old firmware) continue to work with new OSes and old OSes work with new hardware (as much as possible). + +Many of you will know of the efforts that have gone to make this work on Arm server platforms. This effort is based on UEFI and ACPI and is now called SystemReady SR. + +SystemReady IR is a developing standard to do the same for smaller systems typically represented by mobile or embedded Arm systems running Linux or some other high level OS or hypervisor. SystemReady IR is based on UEFI and devicetree. In addition, SystemReady IR incorporates by reference the work of EBBR: the Embedded Base Boot Requirements. + +For more information, see Arm’s [SystemReady](https://developer.arm.com/architectures/system-architectures/arm-systemready) pages. + +## DT or ACPI + +Arm Server systems use ACPI to describe and abstract the platform hardware. ACPI works well for the server market but there are reasons to prefer devicetree for some types of systems. Some of these reasons are technical and some are more political. + +ACPI offers a strong ABI and standard stability. Once a given device type is known to ACPI, it should continue to operate with newer platforms or newer versions of the operating system. ACPI also offers some amount of abstraction for low level platform operations like placing a device into a low power mode. This abstraction works using byte coded functions embedded into the ACPI tables. ACPI works well for server platforms where the variation of device interconnectivity is low and power operations are infrequent. + +Device tree also has a formal specification but that specification outlines the form of the data not the exact schema of every device. A collection of schema for different device classes and specific vendor devices is built up over time and maintained as code. New schema (and the closely related concept of bindings) are peer reviewed and accepted by the maintainers using the standard kernel patch acceptance model. Device tree format is a pure data structure; it contains no code. The kernel must know how to use the data represented by a specific binding in order to use it. If your kernel does not know how to enable clocks on your SoC, you are out of luck. However, abstractions are possible using other standards like [SCPI](https://developer.arm.com/documentation/dui0922/g/css-system-control-and-power-interface--scpi-). SCPI allows DT to describe a desired device power state request to be a SCPI message with data provided by devicetree properties. + +The differences between the two formats directly lead to the advantages and disadvantages of each. DT is more flexible and can be quickly adapted to new situations but has a harder time providing strong ABI guarantees from kernel version to kernel version, year to year. ACPI provides more abstraction but some poor quality byte code implementations have led to the kernel not fully trusting the byte code to be thread safe or to be called 1000s of times per second. + +No matter your opinions on the weights attached to the issues raised above, the vast majority of non server Arm systems use devicetree today. Even as the scope of server-like systems extends out of the data center, there will always be systems smaller or more deeply interconnected than can fit the server model. For these reasons, many feel that non server-like systems will continue to use devicetree indefinitely. + +## Vertical OS vs Distro OS + +The point of SystemReady is to allow out of the box OSes to run on Arm platforms. This includes “Distro OSes” like Debian, Fedora, OpenSUSE etc. In this model all, the OS components like kernel, user space packages, and boot manager (often Grub) come from the distribution while the firmware comes from the platform provider. + +Classic Embedded Linux systems were examples of a “Vertical OS”. In this model all the firmware and OS components came from the system vendor. Often these OS images were only designed to work on a small subset of platforms. This allows the vendor a very high level of control but the vendor must also take on the burden of maintaining all the components. + +When firmware and OSes are SystemReady IR compatible, the Distro OS has a good chance of working out of the box. There are things to do to make this work more efficiently and more often. + +Perhaps more importantly, SystemReady IR allows a new model: A vertical OS built from off the self distro components. In this model, the standard OS components like kernel and user space come from a well established distribution but the vendor pretests combinations and signs off on them. In this way the vendor ensures that things always work but does not need to address each CVE for its own kernel and libraries. + +Even if a given vendor chooses to stay with the vertical model, their burden is reduced if they build different systems with SoCs from different silicon providers. If each SoC vendor enables SystemReady IR, the work of the vertical OS vendor will be reduced as each SoC follows the same pattern. + +## Is DTB part of the OS or part of the firmware? + +In the Distro model, the devicetree binary data (DTB) needs to come from the firmware. In this way, the OS works if the kernel already understands the devices in the SoC, even if it has not been tested on this specific board. Perhaps this is a new SoC based on one that the OS knew or just a new board with a known SoC. Even if the SoC is completely unknown, at least the OS will work well enough to send a message via the UART and may be able to use well defined peripheral controllers like the XHCI for USB. + +However, today the DTB used for the kernel is most often built with that exact kernel. In this way, it is guaranteed that the DTB information matches what the kernel code expects. If the DT bindings have changed between kernel 5.4 and 5.10, the correct DTB will be used for each. In this model the DTB is considered part of the OS image. + +Which model is correct? The answer should be “both”. We wish to always have the firmware supply the DTB in case the OS has no DTB info for the SOC or board. However, we also want to enable the OS to override the DTB if it thinks it knows better. + +## DTB ABI testing (DTB as part of firmware model) + +In theory, new kernels should work with older DTBs for most modern Arm SoCs. However this is very rarely tested and not at all formally. If we believe in this model, we need to start testing it. We need to test older DTBs with the latest kernel versions. How old is older? Ideally all LTS kernels in the past 6 years, but we need to start somewhere so the idea is to start with current LTS - 1. For example, the Linux 5.11-rc releases should be tested against 5.4. We can’t test all platforms so we will start with a few that support the idea. A simple boot test will not be sufficient. We will need to quickly checkout as many peripherals as possible because this is the most likely area to break when the DTB info is incompatible. + +Ideally old kernels should work with new DTBs also. If not, you could have one OS upgrade your firmware and it would break another OS that was previously working. This version of the test is probably a lower priority than the case above, so we will suggest holding off on this testing until the first version is worked out more. + +## DTB Override by a Boot Manager (DTB as part of OS model) + +In a vertical OS model, each kernel version has its own DTB files that are known to work with it. The firmware (often U-Boot) knows how to pick a kernel and load and fixup the associated DTB file. Thus U-Boot is playing the role of platform firmware and also OS boot manager. It does all the work and knows the file layout of the kernel and DTB images. + +However, in the Disto model, the distro will almost always use its own boot manager. (Grub is often used in this role but others are used as well such as the syslinux family, systemd-boot, etc.) + +The presence of this OS boot manager causes several problems if it wants to override the DTB info. Only the boot manager knows which kernel it will choose but only the firmware knows how to perform the DTB fixups. (DTB fixups include inserting the serial number, ethernet mac addresses, or the size of main DDR into the DTB.) + +To resolve this issue, the DTE project is proposing a new UEFI API to the EBBR spec. This API can be used by the boot manager to request the firmware to perform the DTB fixups after it has loaded a new DTB. This API will be implemented in U-Boot. Grub will be enhanced to call this UEFI API if it loads a new DTB. + +Grub will also be enhanced to measure the DTB into a Trusted Platform Module (TPM) if one is present and to verify the signature of the DTB it loads if secure boot is enabled. Upstream Grub has not accepted secure boot enhancements made by others so far so the signature verification of DTB may be another contention point. + +### Conclusion + +SystemReady IR can enable the best attributes of a vertical OS and an off-the-shelf distro OS but some effort and coordination is required to make this real. + +If you are interested in this work please subscribe to the [boot-architecture](https://lists.linaro.org/mailman3/lists/boot-architecture.lists.linaro.org/) mailing list. Our next DTE call will be in early January and will be announced on the mailing list. + +## 2020 Year in Review + +### Preface + +**By Mark Orvek** + +![Photo of Mark Orvek](/linaro-website/images/blog/mark-orvek) + +Linaro has always been a distributed company and has known from its beginning how to work effectively and efficiently with individuals, teams and companies across more than 30 countries. This knowledge and experience has allowed us to continue to be productive even given the challenges of the pandemic. Later in this month’s update, the engineering teams will summarize some of the highlights from this year. Before you read their highlights I want to share my thoughts on the coming year. + +### Where are we headed? + +We are not necessarily going in a different direction, rather, we will be working for better alignment across core engineering and segment groups. We continue to feel collaborative engineering (solving common engineering challenges by working together) and direct participation in the various upstream communities is the best approach for solving complex technical problems. Over the past few months we have been opening Linaro projects, such as Trusted Substrate and Stratos, to everyone working in the Arm ecosystem. As always, Linaro members will control the project plans and direction. In addition, non-members will be able to participate in the project development. Many hands make the work go quicker and lighten the load on all. I have asked the engineering teams to ensure that all project plans include specific deliverables for each development cycle. Some projects will take more than one cycle to complete, even so, the project needs to show demonstrable results along the way. Each project, as part of the Plan of Record (PoR) process, will determine the best way to achieve results in each cycle. + +I wish everyone a very happy and healthy end of year and the hope for a prosperous 2021. + +## Building Fundamentals in 2020 + +**By Mike Holmes, Engineering Director, Foundational Technologies** +https://www.linkedin.com/in/jon-burcham/ + +![Foundational Technologies Team Patches and Stewardship Table](/linaro-website/images/blog/building-fundamentals-2020) + +### Articles + +The [2020 OSPM summit](http://retis.sssup.it/ospm-summit/), which is a significant event for the Linux kernel world, was covered by Jonathan Corbet publisher of [LWN](http://lwn.net). Jonathan wrote an article [“Imbalance Detection and Fairness in CPU Scheduler”](https://lwn.net/Articles/821123/) for the May 22, 2020 issue highlighting the work Vincent Guittot is driving within the KWG and the Linux community. Closer to home we published an [overview of VirtIO work](https://www.linaro.org/blog/virtio-work/) and [The Evolution Of The QEMU Translator](https://collaborate.linaro.org/display/EMR/Linaro+Engineering+Highlights+2020.07) both written by Alex Bennée. In August, Daniel Lezcano published two power management articles. The first was “[Using Energy Model to Stay in TDP Budget](https://www.linaro.org/blog/using-energy-model-to-stay-in-tdp-budget/) ” and [“Thermal Notifications with Netlink”](https://www.linaro.org/blog/thermal-notifications-with-netlink/) . Linus Walleij contributed a widely read article “[How the ARM32 Linux kernel decompresses](https://people.kernel.org/linusw/how-the-arm32-linux-kernel-decompresses)” which was reprinted in the August 13, 2021 issue of LWN. In September, we wrote articles on [BFQ](https://www.linaro.org/blog/bfq-saved-me-from-thrashing/), [Force Idle When a CPU Is Overheating](https://www.linaro.org/blog/force-idle-when-a-cpu-is-overheating/), [Enabling UEFI Secure Boot on U-Boot](https://www.linaro.org/blog/enabling-uefi-secure-boot-on-u-boot/) and [OpenOCD at Linaro](https://www.linaro.org/blog/open-on-chip-debugger-ocd-at-linaro/). In November, a [sequence of articles on Arm32](https://www.linaro.org/blog/linaro-engineering-highlights-november-2020/) by Linus Walleij was published with a wrapup in December of [an overview of the future of 32-bit Linux](https://www.linaro.org/blog/32-bit-linux-bright-future-or-end-of-life/). + +## Linaro Consumer Group + +**By Tom Gall, Engineering Director, LCG** + +![Linaro Consumer Group icon](/linaro-website/images/blog/LCG) + +### Premium Supported Developer Boards for Android + +LCG consolidated its focus on the Member development boards that we had helped push into the AOSP master tree. We maintain these development boards in AOSP, keeping them up-to-date across Android 8, Android 9, Android 10, Android 11, and AOSP with kernel versions that include 4.4, 4.9, 4.14, 4.19, 5.4, 5.10 and mainline. At present, these boards are: Qualcomm’s Dragonboard 845c, HiSilicon’s HiKey and Hikey960, and TI’s X15. + +These boards serve as a great vehicle to do feature development (ex GKIv2, etc), upstreaming, CI and validation for all things Android. They have been instrumental in development and demonstration of new Android features. More details are in the following section. + +Our mainline-tracking activities on these boards have made sure that breakages between Android userspace and mainline kernels are found, reported and fixed as soon as the upstream kernels are released - naturally most of these are found during the merge windows. This also provided the validation paths needed for changes posted upstream. + +## Kernel Engineering for Android + +Kernel engineering for Android was largely concentrated on Android feature enablement and validation, dmabuf heap updates and keeping form-factor devices in sync with mainline kernels. + +As Android continues to evolve, the team focussed on enabling new Android features on the current devices - both the development boards and formfactor devices. GKIv2 was implemented on all the devices and continues to be validated. Relevant patches were posted upstream, keeping in line with the upstream-focus of the GKI effort. New features such as FBE, replacing ION with dmabuf heaps, and clk\_sync\_state were added and validated on the devices. + +After we merged dmabuf heaps into Linux mainline kernel as the replacement for ION, we implemented many features to bring dmabuf heaps closer to ION in terms of feature parity, and there are more patches in-flight. This work is ongoing as the enabler for vendors to move over to dmabuf heaps from ION in AOSP. + +ION has been dropped from mainline from v5.11 onwards, and dmabuf heaps are the way going forward. It was great to see the first patches outside of Linaro for SRAM dma-buf heap upstream. We continue to work with Members and vendors to improve this coverage. We also converted the Codec2 media framework in AOSP to use dmabuf heaps via the libdmabufheaps library which was newly written to allow AOSP users to transition to dmabuf heaps. + +To keep our form-factor devices synced with the mainline, we added more features, while pushing patches for upstreaming. As a result, Poco F1 now has its panel driver, accelerated touchscreen, Wifi, Bluetooth, and audio fully functional with minimal patches outstanding. Adding these features allows the devices to be more usable to us and increase our validation coverage. + +Our Android graphics open stack focus with the Dragonboard 845c continued, where we kept aosp/master libdrm, Mesa GL and drm\_hwcomposer in sync with the upstream with regular merges and fixes. + +In our other upstreaming efforts, we pushed fixes for the devices we manage. Android kernel debt reduction was largely focussed around GKI v2. It was also augmented by increased participation from Google engineers on upstreaming new features. We continue to maintain dmabuf, dmabuf heaps, drm\_hwcomposer and timekeeping, while being highly active as reviewers in the areas of interest. + +## Android Common Kernel Validation + +Android common kernel validation continued to be a strategic focus for the team. So far this year, we have run 402 Million tests, reported in 237 test reports sent through the year. A total of 422 kernels were tested. + +All the board/kernel/userspace combinations mentioned above as our premium supported development boards are under test. Test runs include regular runs of defined subsets of CTS and VTS that exercise the kernel, as well as periodic full CTS/VTS runs on the boards. +With these large numbers of tests being run weekly, average weekly triaging for issues has also increased. Test regressions were found and fixed in many VTS/CTS tests, around webview, networking, bluetooth, storage areas to name a few. Our test report format improved over the year, with additional data about flakey tests, regressions and total failures added based on feedback received. + +Community wise, we had an active year. We were highly active in the Linux Plumbers Android Microconference - both as organising committee members and presenters - driving key discussions and design decisions. John Stultz posted a couple of articles around cache handling and dma-api on LWN. We also did various demos at Linux Plumbers Conference (LPC) and Virtual Connect, notable ones including showcasing GKIv2 and mainline work on Poco F1. + +## AI Project + +Over this past year the Linaro AI Project has focused on impact to Servers, Edge and Microcontrollers. Some of accomplishments include: + +### Servers + +Linaro now hosts the community build for AArch64 TensorFlow. This is an achievement of the CI infrastructure and the engineering effort to overcome build dependencies. + +The Server perspective acknowledges that inferencing dominates the Machine Learning element of AI. By exploring the Training component of machine learning (ML), servers provide the added resource requirements for the ML Models to be built that inference relies upon. + +In addition to ML Frameworks, Servers explore novel approaches to AI to see how these can be applied to disaggregated/distributed computing environments. + +### Edge + +ArmNN/ACL made several releases through the course of the year. The Arm team collaborating through the AI Project took in patches to add a Python interface. Boost dependencies have started to be removed. Performance and other notable improvements happened through the course of the year. + +ONNX-RT/ONNX activity this year included the integration of ACL and then ArmNN as part of ONNX-RT. A number of performance improvements were authored by the NXP team that complement these integrations using the ONNX-RT on Arm. + +TVM is a deep learning compiler that supplies superior performance. This year saw many large performance improvements for Arm platforms. Those engaged on the project benefited from performance updates having visibility as improvements landed. TVMC was integrated into the project. This major contribution by the Arm team gives TVM a command line interface to compile, run, profile and tune models without having to author any python code. This helps to make TVM far more usable to those who aren’t experts in AI or TVM. ACL was also integrated into TVM. This toolbox of performance optimized kernels while not fully utilized yet will help to improve performance of inference on Arm. Ethos-N NPU support was also integrated into TVM. Efficient use of offload technologies on Arm is a key attribute of why companies are collaborating on TVM. + +### Microcontrollers + +The µTVM project was launched in coordination with the LITE segment group. The goal is to complete the vision bringing the superior effectiveness of AI Deep Learning compilers to Arm microcontrollers. The direct benefit to Members is the ability to add to their SDK product portfolio. Members collaborating/coordinating through the project increase their value by directly impacting the engineering priorities to align with their own. As the project has gotten off the ground, Zephyr and Mbed RTOS integration is one of the first achievements. The prototype code has started to evolve with the runtime and rpc mechanisms moving forward. There is plenty to do in 2021 on this project. + +TensorFlow Lite Micro is another important piece of the AI Project. As LITE launches their CI system, we will be integrating Tensorflow Lite Micro workloads into this CI. We will be working with Members through the next engineering cycle to identify Tensorflow Lite Micro engineering activities through LITE. + +### Linaro IoT and Embedded Group + +**By Vicky Janicki, Engineering Director, LITE** + +![Linaro IoT and Embedded Group](/linaro-website/images/blog/lite) + +Over the past year, the LITE team has been expanding the reach of several LITE contributing technology areas. Trusted Firmware for Cortex-M (TF-M) is a maturing codebase. Kevin Townsend (Linaro) has been contributing in various ways to reduce the barriers to successful use. To this end, he has contributed working sample applications showing realistic use cases with TF-M based authentication and certificate management. Kevin has contributed blogs and sessions at conferences such as Arm DevSummit as part of this effort. TF-M also can now be used out of the box with QEMU on Zephyr. Both David Vincze (Arm) and Andrei Gansari (NXP) were active contributors to the TF-M 1.1 (July) and TF-M 1.2 releases with board support and testing. + +![MCU Boot icon](/linaro-website/images/blog/mcuboot-logo) + +MCUboot, a secure RTOS bootloader, has been garnering more contributors in 2020. The release cycle sped up resulting in 3 releases in 2020 (1.5, 1.6 and 1.7). A significant milestone was the removal of the MCUboot tree within the TF-M build system and the move to use upstream MCUboot in the TF-M 1.2 release in November. This significantly reduces the amount of out of tree code. Out of tree features such as RAM Load, No Swap and Hardware Rollback support were also merged upstream. David Brown, as co-maintainer, led these efforts as well as speaking at general and security conferences. As a side note, David was invited to be a maintainer for Mbed TLS, a widely used cryptography library. + +With the added activity, MCUboot outgrew its home at JuulLabs and in November was migrated to a new home, mcu-tools, on github. A membership agreement and open governance charter for the project are available. A kickoff meeting for founding members is planned for January/February 2021. + +The team worked closely with the LAVA and Lab teams over the past year to develop innovative ways to integrate MCU’s into LAVA and the Linaro CI infrastructure. Because each MCU tends to be unique in multiple ways (inputs, core configurations, tools and software etc), finding common mechanisms and adding support has sometimes been difficult. The team is cleverly using Docker containers as well as updating LAVA itself to have a prototype running a commercially available Member board. We also are prototyping using Raspberry PI’s as dispatchers. Kumar Gala, LITE technical lead, and Paul Sokolovskyy (Linaro) have been herding this effort along, weeding and seeding throughout the year. + +Kumar, a senior Zephyr project maintainer ended the year as the #1 contributor across three releases - 2.2 (March), 2.3 (June) and 2.4 (September). In addition to his role as TSC member, Kumar has released 6 updates to the SDK. Erwan Gouriou (ST) was ranked #10. LITE team members added support for the Arm Musca S1, the NXP LPC55S and ST boards. This fall, Kevin began work on supporting uTVM on Zephyr, starting with building it within the Zephyr build system. + +## Linaro Edge and Fog Computing Group + +**By François-Frédéric Ozog, Engineering Director, LEDGE** + +![Linaro Edge Networking Group icon](/linaro-website/images/blog/ledge) + +### Trusted Substrate + +The project that started as Dependable Boot last year, evolved into Trusted Substrate. The Dependable boot code is still there and is a vital piece of Trusted Substrate, which aims towards SystemReady compliant firmware, while adding a substantial amount of security on the chain of trust. + +The majority of the goals defined at Linaro Connect 2019 Bangkok (BKK19), regarding U-Boot EFI subsystem were achieved. Up to now platforms could either support OP-TEE or EFI variables stored securely, since OP-TEE and StandAloneMM are mutually exclusive. Patches in U-Boot, OP-TEE and EDK2 have been merged, which allow OP-TEE and StandAloneMM to coexist. Moreover, storing the EFI variables in an RPMB partition of an eMMC is also available, which allows small embedded devices without a flash in the secure world to protect their EFI variables against a variety of attacks. + +Continuing our effort in Trusted Substrate, a number of features have been merged (or are in the process of merging) in U-Boot. Features like UEFI secure boot, Capsule Updates, EFI RNG protocol and EFI TCG2 protocol, provide additional functionality, security and greatly enhance the chain of trust. It’s worth mentioning that some LEDGE engineers became maintainers in projects to which they contribute. + +Continuing the work on secure devices, we are working on secure, rollback and brick protected firmware upgrades, collaborating with Arm in defining the protocol and providing a proof of concept. + +## LEDGE Reference Platform + +The LEDGE reference platform evolved substantially since last year. We now offer prebuilt images for a range of boards. Although the scope of the platform up to now was to provide a reference OS, we also provide prebuilt images that can be easily deployed on a range of platforms. Those images include, apart from the OS itself, an EBBR compliant firmware. + +The reference platform consumed the majority of the work done upstream. As a consequence, we can now provide images that have EFI enabled and use an architecture agnostic way of loading an initramfs. We are currently working on adding the rest of the features, like TCG2 protocol support, using the firmware TPM which we now provide. + +PARSEC (Platform AbstRaction for SECurity, an API to hardware security and cryptographic services) support was added, including its daemon and the required user-space libraries which currently use our FirmwareTPM device. PARSEC integration included a meta-rust layer to be enabled and developed bitbake recipes can be used as examples of packaging embedded applications written in the Rust programming language. We also solved the problem of fetching Rust application dependencies with the Cargo tool. + +During integration of Trusted Substrate work, we found and solved bugs: + +1. Wrong calculation crypto signature in uefi U-Boot if virtual machine was run with a different amount of memory. The connection patch was merged on the community review stage. +2. Depending on virtual machine memory size, the initrd image can not be loaded. This was an issue with the communication protocol between UEFI uboot and UEFI kernel stab, which allocated memory and copies there initrd. The fix went to 5.10 kernel. + +We also added documentation for LEDGE RP- the LEDGE User guide and LEDGE Developer Howto. (https://linaro.github.io/ledge-doc). Documents are also generated during the Open Embedded build. + +We removed the injection of the OPTEE compatible node to QEMU device tree. Previously we did that with -dtb qemu parameter, later with patching QEMU. Now OPTEE OS itself adds this node. + +## QEMU BSA + +Support of the QEMU virtual machine as a reference machine to run LEDGE/Trusted Substrate work continued with enhancements for QEMU with reboot, machine power down enhancements for secure boot. We made several proposals and sent these as patches to the QEMU mailing list: + +* sbsa watchdog for qemu virt platform matching linux kernel sbsa-gwdt driver. Maxim Uvarov’s patch was integrated with the Sashi Mallela patch. The combined patch finally was merged. +* For LEDGE RP we enabled wdt\_i6300esb PCI watchdog. +* A proposal to use sbsa\_ec controller to reboot QEMU secure virtual machine was sent as patches to QEMU and Arm Trusted Firmware mailing lists. But discussion of next improvement sbsa\_ec may break the virt platform, so the community decided to use secure gpio (pl061) to reboot/shutdown a virtual machine from the secure world. Maxim is working on a new set of patches. + +## CI + +On the CI front we continued adding tests for the reference platform. Since the firmware development paved the way on the previous cycle, one major addition is the FWTS test suite running in LAVA. + +## Community leadership + +LEDGE has been active in promoting Trusted Substrate and LEDGE Reference Platform as an Arm Cassini implementation at various organizations and during a number of events. + +The most salient results are: + +* Reference to Linaro and Trusted Substrate in Industrial Internet Consortium [Distributed Computing in the Edge](https://www.iiconsortium.org/pdf/IIoT-Distributed-Computing-in-the-Edge.pdf) +* Industrial Internet Consortium Journal of Innovation article on [Over-The-Air updates in automotive sector](https://www.iiconsortium.org/news/joi-articles/2020-March-JoI-Why-Are-OTA-Updates-Needed-for-ITS.pdf) +* Bright Talk [Trusted Substrate webinar](https://www.brighttalk.com/webcast/679/427036) +* Bosch webinar on [over-the-air updates for off-road machinery](https://bit.ly/3oXYVcQ) +* FOSDEM presentation for [XDP](https://archive.fosdem.org/2020/schedule/event/xdp_and_page_pool_api/) +* NetDev presentation on [page\_pool API and XDP](https://netdevconf.info/0x14/session.html?tutorial-add-XDP-support-to-a-NIC-driver) +* ArmDevSummit presentation on LEDGE RP (https://devsummit.arm.com/agenda/?search=ledge#/) + +## System Technologies + +**By Ryan Arnold, Engineering Director, System Technologies** + +2020 has been an extremely busy year for the Linaro System Technologies Group. Included below are the highlights of the impressive contribution of this team. + +### Keep The Lights On - The Tip Of The Iceberg + +In the last year, the Linaro STG team has resolved a stunning 800+ LSS tickets. This represents requests from across Linaro’s segment and working groups, projects where we provide services directly to Members, Linaro developer services, Linaro landing teams, Linaro community projects (such as Trusted Firmware), directed projects such as Morello, and our own internal needs. These 800+ tickets do not include software feature requests that are a part of the collection of open-source software that we created and continue to maintain. + +### LTS Kernel Testing SLA - 100% of all LTS releases validated in less than 48 hours! + +Repeating the success from last year, the KV team and LSS teams have successfully validated all (100%) LTS releases in less than 48 hours, exceeding our goal of 80%. This took discipline and in some cases extreme effort to achieve. + +### Kernel Image Repacking (KIR) + +[KIR](https://github.com/Linaro/kir) is a tool that allows repacking of kernel images into boot images and/or rootfs images. KIR was written to eliminate the need to have a custom rootfs build for every LKFT LAVA job. Any LKFT job can run any kernel without additional modifications. This allows functional test bisections to be much easier to accomplish, and allows us to reduce the amount of time it takes to execute an LKFT build since we’re able to build the rootfs images out-of-band from the kernels being tested. + +## Reported By + +All the work we have done on LKFT tooling, process, and initiative of our reporting individuals comes together with successful reported-by and regression reports to both Greg KH, and Linus Torvalds. There have been many demonstrations of Linaro engineers expertly navigating the upstream bug reporting process successfully. This leads to the establishment of Linaro as experts in the area of Linux kernel testing. + +![SQUAD logo](/linaro-website/images/blog/squad) + +### Squad Client + +The Squad client was started as an effort to improve test report customization for the end users. As initially designed, server based reporting was difficult to use and hard to customize. Together with LKQ engineers, we came up with a simple, API-based, command line tool that allows users to produce customizable reports from the data collected in SQUAD. The tool is still in active development and is already used by Linaro Developer Services. The LKQ team is starting to make greater use of this project in their effort to improve kernel testing reports. + +### KissCache + +Linaro recently developed and open-sourced KissCache, a simple caching server built on the KISS principle: Keep It Simple and Short. Unlike classical proxies like Squid that transparently intercept traffic, in order to use KissCache one must explicitly prefix the requested URL by the URL of the local KissCache instance. KissCache will download the requested resource in the background while streaming it to the client. + +If many clients are requesting the same resource, KissCache will download it only once and stream the content to every client. When artefacts are hosted on a system where network bandwidth is charged per unit (such as Amazon S3), this can amount to several thousands of dollars in savings per month (as was the case in Linaro.) We’ve also seen this deployed at Linaro Member companies to similar success. + +### LAVAche + +LAVAche is an interesting solution to the problem of QEMU scalability in LAVA instances. Formerly QEMU instances were externally managed as LAVA dispatcher+DUT combinations that were provided directly to LAVA in the traditional method, i.e., statically allocated. LAVAche provides a way for LAVA to utilize cloud availability for running (and scaling) QEMU targets ‘onDemand’. The prototype is able to assess the QEMU target queue depth and dynamically bring-up GCP (Google Cloud Platform) servers as QEMU target devices to which LAVA jobs are dispatched. When the queue is empty it is able to tear down the allocated instances. This hasn’t yet been upstreamed, but it’s likely to make its way into some technology prototypes in the near future. + +### LAVA test plans + +The LAVA test plans project was created to combine test-definitions (see below) with LAVA device types and produce valid LAVA job templates. The task isn’t easy because LAVA job definitions tend to use implicit dependencies, for example some types of deployments only work for certain devices. At the same time the goal of LAVA test plans was to produce a valid LAVA job for every possible combination of LAVA device type and test-definitions test. Currently the project is used by the LKFT team but there are a number of proposals to use it in Linaro Developer Services projects. + +### Test-Definitions + +For several years the [test-definitions](https://github.com/Linaro/test-definitions) project has provided a good base for LAVA test encapsulation. This year it received one important improvement - documentation. The documentation is now auto generated and available in [readthedocs.io](https://test-definitions.readthedocs.io/en/latest/). This project is used quite heavily both inside and outside of Linaro (see [github fork metrics](https://github.com/Linaro/test-definitions/network/members)). This project provides a powerful ‘network’ effect for Linaro because it establishes Linaro as experts in automated testing. + +![LKFT logo](/linaro-website/images/blog/lkft) + +### LKFT 2.0 + +The idea behind LKFT 2.0 is that we could pivot our architecture to develop and make use of discrete and reusable components which improve our ability to scale, lead to developer-controlled LKFT pipelines, and reuse of components outside of the Linux Kernel Quality project directly. This is exactly what has happened. LKFT is now using TuxBuild as the Linux kernel build engine. We’re also making LKFT pipelines available to Linux kernel developers and we’re working on reporting concepts which are applicable everywhere. + +LKFT 2.0 now includes the following components: + +* Reusable Gitlab pipeline definitions +* TuxBuild +* TuxMake +* Lava-test-plans to generate LAVA test definitions in a robust and reusable way +* Squad +* Squad-client +* KIR +* TuxPub for hosting LKFT root filesystems +* LAVA +* Linaro lab hosted target devices +* Test-definitions for hosting the implementation details of running each test +* KISS Cache +* Openembedded layer meta-lkft + +Each of these components can and are often reused outside of LKFT for their specific purpose. Together, they provide all of the functionality needed to deliver LKFT. What’s really interesting about this list is that many of the innovations from this year have already been adopted in LKFT, but by no means is LKFT the only place these are usable. + +![tuxsuite logo](/linaro-website/images/blog/tuxsuite) + +## [TuxSuite](https://tuxsuite.com/) + +The term ‘TuxSuite’ might be new to many people since this ‘branding’ term was just recently decided upon as the name of the suite of tools we’re building around the cloud-scalable Linux kernel build and test capabilities. + +![tuxmake logo](/linaro-website/images/blog/tuxmake) + +### [TuxMake](https://gitlab.com/Linaro/tuxmake) + +TuxMake is an open source project that provides curated build environments and tools (in containers) that are necessary for building Linux kernels. It provides for kernel builds what git provides for kernel source, that is portable and reproducible builds. It’s a problem that much of the upstream Linux development community doesn’t even realize they have (collectively spending incredible amounts of time dealing with broken builds and frustration about lack of build reproducibility). TuxMake has delivered on its promises and is currently being fully integrated into TuxBuild. TuxMake has a chance to change Linux kernel development best-practices. We’re hoping for industry adoption of TuxMake in the future. + +![tuxpub logo](/linaro-website/images/blog/tuxpub) + +### TuxPub + +TuxPub is the “Serverless File-Server”. It’s a file server that does not require any actively running servers, with cloud-native scalability and availability and it costs very little to run and maintain since it’s “just software”. It features a simple, minimal design and 100% unit-test coverage. It solves the problem of how to provide a light-weight, content view of related files similar to an Apache directory listing and is applicable for any project that stores artifacts in AWS S3. TuxPub is already living up to the promise and being used in places in Linaro outside of the TuxSuite proper, such as in LKFT. + +![tuxboot logo](/linaro-website/images/blog/tuxboot) + +### TuxBoot Prototype + +TuxBoot is the sibling service to TuxBuild. TuxBoot was conceived with the grand vision of being able to boot any Linux kernel built by TuxBuild in emulation, in constant time. The first step in this vision was to execute a prototype. The prototype started with a very narrow mission--to prove that we could boot test 100 Linux kernels simultaneously “in the cloud” using ephemeral LAVA hosted QEMU instances. This required learning cloud-native ‘serverless’ methodologies so that there is no TuxBoot infrastructure running when there are no TuxBoot jobs being executed. We now understand AWS Amazon Machine Images (AMI), AWS queuing with SQS, Auto-scaling groups, AWS spot instance ‘on-demand’ virtual machine management, AWS APIs, and AWS Lambda. They used TuxBuild as a model for the API and serverless backend but had to solve major technological problems to get the ASG model working properly. + +![tuxbuild logo](/linaro-website/images/blog/tuxbuild)[TuxBuild](https://gitlab.com/Linaro/tuxbuild) has been impressively reliable over the last year, with zero downtime, attributable to the continuous deployment methodology of the development team as well as a testament to the power of the serverless methodologies when implemented according to “best-practices”. The Tux team has been making consistent improvements in the areas of scalability for the last year. Not only have they been executing disciplined weekly load tests which have helped them find scalability corner-cases (such as abnormalities in how AWS reaps spot-instances immediately after allocating them), but they’ve also been working on fundamental improvements to how we manage hitting foreign git-servers at scale. + +We learned a lot about how to crash git server hosts in the last year while scale-testing TuxBuild and we realized that in order to execute on our vision for TuxBuild we could not have TuxBuild hammering kernel.org, github.com, or gitlab.com with thousands of simultaneous fetch requests or we might get sternly worded emails from those service providers. As a remedy we developed a serverless git repo cache and mirror mechanism, proprietary to TuxBuild, that prevents TuxBuild from saturating external git servers. + +## LDCG + +![Linaro Edge Networking Group icon](/linaro-website/images/blog/ledge) + +This year we have had a goal to move the Colocation datacentre facility from London to Cambridge. The Colo hosts the infrastructure for the Linaro Developer Cloud. This is a free service to enable Any developer to gain access to Arm-based Server-grade environments. + +Over the years the hardware at the Colo has been added to with products, sometimes early access before general release, from Qualcomm, HP, Marvell, Mellanox and Huawei. As we go to print with these highlights, the latest addition will be from Fujitsu. Moving forward, we have transitioned and retired older hardware whilst still being able to maintain our overall compute capacity. + +The relocation of facilities has also given us an opportunity to re-evaluate how services are delivered at the Colo. Until now, allocation of workloads have been siloed per team/function +to specific racks/chassis. To improve general availability of chassis across varying workloads and to optimise power consumption, we are in the process of consolidating chassis access and applying live migration of workloads across optimal numbers of chassis and will power-down spare chassis between workloads. + +LDCG covers more than the infrastructure. Each of our teams focus on specific areas of Arm-based server requirements. We have also had a number of new recruits this year. + +In Server Architecture this year we welcomed Shashi Mallela, based in Canada. Shashi is helping to develop the Arm SystemReady SR subset for emulation, otherwise known as SBSA QEMU. On its own, QEMU provides an emulation environment for a range of architectures. The SBSA variant provides a whole chassis emulation environment so that developers can try libraries that interact with whole systems rather than pure CPU/Linux Kernel focus. This can be used for pre-availability of hardware testing. SBSA QEMU forms the emulation environment for our SmartNIC research too. + +Our avid Cloud developers, Kevin Zhao & Xinliang, both based in China, have been identifying and coding to enable Arm-based support in the Ceph storage platform which ended up also enhancing functionality on non-Arm-based architectures too. Ceph is the storage backend that underpins the whole LDCG datacentre solution. Our virtual machine environment is managed within OpenStack, a community project which enables Live Migration services, which are key to the new datacentre layout. Notwithstanding that, the Cloud team have managed to enable Linaro to be recognised as a Kubernetes-certified test environment, which sits atop OpenStack/Ceph. + +![HPC Supercomputer image](/linaro-website/images/blog/hpc-supercomputer-image) + +Feeding into our new datacentre layout and supercomputer will be the need to handle Big Data. Our very own Ganesh Raju, based in the US, just celebrated the release of BigTop v1.5. Ganesh and his Arm Member engineers have been working hard to ensure the various components that make up Big Top all built correctly, and plugged the holes with new code as needed. Big Top will be the stack that houses our Machine Learning models that devour CPU cores for number crunching needs. + +For our AI component too, Arm has added two Member engineers to help with our ML Framework development. So, with the rest of LDCG’s assignee and Member engineers - Marcin Juszkiewicz , Masato Fukumori, Masahisa Kojima, Jun He, Guillame Gardet, Nathan Sircombe, Crefeda Rodrigues, Yuqi Gu, LDCG is looking strong for 2021. + +This year we have seen the amalgamation of two workgroups, HPC-SIG and AI on Servers, to form the HPC SmartScale project (HPC-AI). It’s also where we’ve seen growth in welcoming new recruits Andrew Goodbody and Takis Mavrodakos. We’re also actively recruiting now for a third! All are based in Cambridge. Andrew and Takis have been providing the physical work of maneuvering the servers from our old London Colo to the Cambridge site, whilst remotely Kevin and Xinliang have been sending many requests to plug Cable 43 to Port 98 and Cable 23 to Switch 2. The list goes on. Well done all - it’s a mammoth task. When the move is over, I’m sure they’ll enjoy the sole focus of software development! + +HPC-AI is to promote the Arm-based use of servers in high performance environments and enable intelligent decision making based on input streams. It’s an environment that wouldn’t be possible without all of the components that make up LDCG and the collaboration with notably TCWG and the rest of Linaro. + +What? We can do more! We are just beginning the challenge to take-on and support Neoverse in the Hyperscaler environment. This means, cloud-native, disaggregated heterogeneous, distributed computing that utilises AI/ML for smart-enablement of the backend for all those incoming API/RPC calls from the EDGE and IoT fields. Watch this space! + +To find out more about the work Linaro does do [get in touch](https://www.linaro.org/contact/)! diff --git a/src/content/blogs/linaro-engineering-highlights-for-august-2020.mdx b/src/content/blogs/linaro-engineering-highlights-for-august-2020.mdx new file mode 100644 index 0000000..ba7898c --- /dev/null +++ b/src/content/blogs/linaro-engineering-highlights-for-august-2020.mdx @@ -0,0 +1,133 @@ +--- +title: Linaro Engineering Highlights for August 2020 +description: This blog covers the many developments the teams at Linaro have + been working on during August including the initial TVM AI compiler + performance numbers on ARM64, using energy model to stay in TDP budget, + thermal notifications with Netlink, how the ARM32 Linux kernel decompresses + and the history of the Linux kernel and Linaro at one million commits. +date: 2020-09-14T04:19:30.000Z +image: linaro-website/images/blog/10-year-graphic-horizonal1 +tags: + - arm + - ai-ml + - linux-kernel + - testing +author: jon-burcham +related: [] + +--- + +## Initial TVM AI Compiler Performance Numbers on ARM64 + +**By Tom Gall, Engineering Director, AI/ML** + +![class=small-inline left Artificial Intelligence (AI) icon](/linaro-website/images/blog/ai) + +Within the Linaro AI Project, the TVM AI compiler is one of our main areas of focus. This compiler is able to produce optimized binaries for a variety of targets that include the universe of ARM processors Cortex-A (64bit& 32bit) to Cortex-M as well as a variety of offload engines. Besides targets, one of TVM’s most important features is the ability to consume models from numerous strategic frameworks such as Tensorflow, Tensorflow Lite, ONNX, PyTorch, and MXNet, to name a few. + +An early question to answer in the lifetime of Linaro involvement with the project is what is current performance like? What are expectations for improvements going forward? + +In the case of TVM, the framework with its layered architecture gives the Arm ecosystem the benefits of an AI compiler which performs its own optimization steps that are separate from those which are dependent on processor architecture. Gains can be achieved in both layers. + +How do we measure and compare TVM across the various AI frameworks? A reasonable way is to utilize reference models that are available for download such as those from the Tensorflow project [tensorflow.org/lite/guide/hosted\_models](https://www.tensorflow.org/lite/guide/hosted_models) and then compare the performance of those models within the original project as well as with TVM. + +In this article we’ll compare image classification models which is one type of inference to perform. There are certainly other types such as Natural Language Processing models that will be added to the comparison in time. + +![relative inference performance on ARM64 chart](/linaro-website/images/blog/relative-inference-performance-on-arm64) + +What is shown is relative performance where the time to performance inference using a set of reference images is used. Multiple runs were used to obtain an average result and standard deviation. A shorter bar is better, while a longer bar is worse. A bar which is double the length means that the time to perform inference on a reference image took twice as long. All operations were performed on the CPU. It’s important to remember that not all SoCs have GPUs or other offload hardware available to them. + +All measurements in this article were using quantized models. + +While this gives you an early peak as far as performance trends, there are two very important data points to keep in mind. + +One of the major features of TVM is AutoTVM, which is a compiler which, through successive runs, utilizes feedback to further tune a model to improve performance. AutoTVM was not used in this analysis. We’ll explore what AutoTVM can do in future articles. + +On the TFLite side, the framework does not automatically take advantage of multiple cores. Using a multi-core setting is left as an exercise to the user. + +As a result, both frameworks have further performance improvements they can realize from their existing code bases. + +There is work ahead. This past month, the Arm Compute Library is starting to land within TVM, which will give TVM access to highly optimized subroutines for the Arm architecture. We’ll explore these and other Arm targeted improvements within TVM in future articles. + +The goal of the Linaro AI project is to enable superior inference performance within the Arm ecosystem. Coupled with the Linaro lab, with the now assembled range of Member devices, we have the capability to monitor performance as AI development occurs. We look forward to sharing future news in this area. + +## Using Energy Model To Stay In TDP Budge + +**By Daniel Lezcano, Senior Engineer, Kernel Working Group** + +**\*Introduction** + +![class=small-inline left Core Engineering icon](/linaro-website/images/blog/core-eng) + +Due to the increasing complexity of SoCs, we’re now seeing lots of thermal sensors on the die to quickly detect hot spots and allow the OS to take steps to mitigate these events - either through better scheduling, frequency throttling, idle injection or other similar techniques. + +The performance states of a device usually follow a quadratic curve in terms of SoC power consumption which explains why it can have a very significant impact on the system. + +The power management is done from the kernel side with different frameworks: the cpufreq automatically adapts to the performance state via the operating points, depending on the system load, the thermal framework which monitors the components temperature and caps their performances in case of a hotspot detection. There are more techniques but, for the sake of simplicity, we won’t mention them in this blog. + +Continue with this article [here](https://www.linaro.org/blog/using-energy-model-to-stay-in-tdp-budget/). + +## Thermal Notifications With Netlink + +**By Daniel Lezcano, Senior Engineer, Kernel Working Group** + +**Introduction** + +![class=small-inline left Core Engineering icon](/linaro-website/images/blog/core-eng) + +**The thermal framework - a nice design** + +* The thermal zone is the abstraction where the hardware sensor implementation provides the backend driver to return the temperature via unified callbacks. +* The cooling device is the abstraction of the device in charge of reducing the temperature. It could be a passive cooling device by reducing the performance of the monitored device like changing the operating point of a CPU, or an active cooling device like a fan. The former does not need extra energy to cool down, while the latter does. +* The thermal governor is the logic which acts on the cooling device to mitigate the temperature. + +The way a thermal zone is monitored will depend on the sensor capabilities: + +* Some sensors can only give the temperature when requested, in this case the thermal zone temperature will be monitored by a periodic timer. That means the idle system will wake up to check the temperature even if there is nothing to do. +* Some more modern sensors can be programmed to send an interrupt when a specific threshold is reached. In this case, the system can stay fully idle, no wake up is necessary. Please note that the polling mode also introduces a latency in the temperature threshold detection; statistically speaking it is the half of the timer period. For instance, for a one second polling time, the average latency for detection will be 500ms, a duration that is far too large for modern boards which can experience thermal variance at a rate of up to 0.5°C / ms. In this case, the interrupt mode is the guarantee of a synchronous action via the interrupt handling when a temperature threshold is reached. + +Continue with this article [here](https://www.linaro.org/blog/thermal-notifications-with-netlink/). + +## How the ARM32 Linux kernel decompresses + +**By Linux Walleij, Senior Engineer, Arm Assignee** + +![class=small-inline left Core Engineering icon](/linaro-website/images/blog/core-eng) + +ARM traditionally uses compressed kernels. This is done for two major reasons: + +* It saves space on the flash memory or other storage media holding the kernel, and memory is money. For example for the Gemini platform that I work on, the vmlinux uncompressed kernel is 11.8 MB while the compressed zImage is a mere 4.8 MB, we save more than 50%. +* It is faster to load because the time it takes for the decompression to run is shorter than the time that it takes to transfer an uncompressed image from the storage media, such as flash. For NAND flash controllers this can easily be the case. + + This is intended as a comprehensive rundown of how the Linux kernel self-decompresses on ARM 32-bit legacy systems. All machines under arch/arm/\* uses this method if they are booted using a compressed kernel, and most of them are using compressed kernels. + +Continue with this article [here](https://people.kernel.org/linusw/how-the-arm32-linux-kernel-decompresses). + +## The history of the Linux kernel and Linaro at One million commits + +**By Mike Holmes, Engineering Director, Foundational Technologies** + +### One MIllion Commits + +![class=small-inline left Linux Kernel Penguin icon](/linaro-website/images/blog/linux-kernel-security)[10 years](https://www.linaro.org/blog/linaro-a-decade-of-development/) of collaboration in the Arm ecosystem with a blog post by David Rusling. Now at the end of the summer of 2020, the Linux kernel, which has been a large part of that collaboration, has also celebrated its 1 Millionth commit after 29 years of effort with a write up in ZDNet. \[1] + +That means that Linaro has been contributing to the Linux kernel for about a third of the kernel’s existence and in that time it has had a tremendous impact, driven initially by efforts to address fragmentation, and later to add or enhance capabilities that expose Arm SoC strengths, a task which continues to this day. + +In the most recent 5.8 kernel, we find that over half the code was written in the last seven years \[2], and that ranking contributions between 2007 and 2019 which includes the three years before Linaro’s inception, we still find that Linaro is ranked as the 5th largest organisation contributor! \[2] + +**In perspective, over half of the kernel is written by organisations or consultants in the last seven years and Linaro is right in the thick of it.** + +#### It's not just the commits any longer + +The release model has evolved with much greater emphasis on automated tooling since 2010. Looking at the history of automated testing of the kernel, we see that the first recorded entrant into the game was Coocicheck in 2010 when Linaro started, followed by 0-day in 2012 and many more entrants since then. This year's Linux Foundation Kernel history report lists one of the latest entrants to the automated test regime, Linaros Linux Kernel Functional Testing (LKFT) which is a much more recent testing effort from Linaro along with KernelCI, Buildbot etc. + +With the considerable effort being put in to the LKFT and its supporting Tux suite of build and regression tools, it is quite possible that Linaro will rank in the top 5 contributors to Kernel testing as well as the top five contributors to the kernel itself, currently the top six contributing tools \[1] in order of bugs reported as tracked by the tag “Reported-by” are: + +1. Hulk Robot +2. Syzbot +3. 0-day +4. Coccicheck +5. Kernel CI +6. Coverity + +Ref: \[1] Author: Steven J. Vaughan-Nichols for Linux and Open Source. diff --git a/src/content/blogs/linaro-engineering-highlights-may-2020.mdx b/src/content/blogs/linaro-engineering-highlights-may-2020.mdx new file mode 100644 index 0000000..8e42023 --- /dev/null +++ b/src/content/blogs/linaro-engineering-highlights-may-2020.mdx @@ -0,0 +1,329 @@ +--- +title: "Linaro Engineering Highlights: May 2020" +description: > + This article looks into the latest news & developments that Linaro has been + working on during May 2020. Read more here. +date: 2020-06-05T11:58:17.000Z +image: linaro-website/images/blog/open_source_keyboard_under_2mbjpg +tags: + - iot-embedded + - ai-ml +author: jon-burcham +related: [] + +--- + +### **Linaro AI Project: uTVM** + +By Tom Gall, Director, AI/ML Project Lead + +![class=small-inline left AI icon](/linaro-website/images/blog/ai) + +TVM is an AI compiler for inferencing which can create highly optimized binaries for deploying on ARM systems. Micro TVM or uTVM is an effort to leverage the advanced technology in the compiler infrastructure as applied to microcontroller devices. + +From the Members meeting in January you might remember the exercise to determine what activities would bring value to Linaro members involving AI on microcontrollers. The recommendation delivered to the LITE-SC was to approve a uTVM project as part of the AI efforts within Linaro. The LITE-SC vote is in progress as this is being written. + +If any individual member would like a briefing we are happy to do so. + +A product level specification, internal to Members and Linaro only, has been created which documents the various modifications / goals that need to be completed in order to evolve uTVM from its current PoC/Alpha state to a mature piece of software which can be utilized within Member products. The creation of this document is a group effort by those engaged in the project. The document will serve as our roadmap to success + +Engineering related to the project has already begun: + +[Arm Ethos-N integration RFC](https://discuss.tvm.ai/t/rfc-ethosn-arm-ethos-n-integration/6680) + +[First microTVM testcase (Merged)](https://github.com/tom-gall/tvm/commit/30e3ce99a7dc7aef9c388e0ebc05018b4c4ba721) + +The project is open to club/core Members to join. If a Member is not a club or core Member or part of the LITE-SC, they may also join by either joining LITE or by becoming a project member. + +Email tom.gall@linaro.org for details or questions. + +### **Firmware Framework for Arm (FFA) Specification [(1.0 EAC release)](https://developer.arm.com/docs/den0077/a)** + +By Mike Holmes, Director, Foundational Technologies + +![class=small-inline left Core Engineering icon](/linaro-website/images/blog/core-eng) + +Arm and Linaro have been collaborating on prototypes with changes in the OP-TEE kernel driver, OP-TEE OS and Trusted Firmware based on the different versions of the FFA (formerly SPCI) specification. Having the OP-TEE regression suite xtest pass has improved confidence in the different versions of the specifications. Later versions of the prototypes have also included a secure world (S-EL2) hypervisor based on Hafnium. Linaro created the first prototype and after that it has been a shared effort. + +### **KissCache: A New Caching Server** + +By Ryan Arnold, Director, System Technologies + +![class=small-inline left LKFT icon](/linaro-website/images/blog/lkft) + +[KissCache, the "simple and stupid caching server"](/blog/the-kisscache-caching-server/), is a newly released open source project from Linaro that is now used in production by the Linux Kernel Functional Test (LKFT) project. KissCache is used to cache and serve binary artifacts to Linaro’s LKFT LAVA instance. These artifacts are held in Amazon S3. Using Kisscache both saves Linaro money by caching artifacts in the Linaro lab (reducing bandwidth usages from S3) as well as increases job execution time because artifacts are served much more quickly, and therefore systems are provisioned more quickly. + +Unlike classical proxies like Squid that transparently intercept traffic, in order to use KissCache one must explicitly prefix the requested URL by the URL of the local KissCache instance. KissCache will download the requested resource in the background while streaming it to the client. Kisscache’s primary use case is for downloading and caching https (secure) content. It preserves the chain of trust, whereas Squid really only works properly with non-secure content. + +If many clients are requesting the same resource, KissCache will download it only once and stream the content to every client. In the last month, Linaro’s KissCache deployment handled more than 160k requests, serving 32TB of data while only downloading 1TB from outside of the Linaro lab. This has a real cost savings of over $2000 per month. + +### **Tuxpub - The Serverless Cloud-Based Artifact Server** + +By Ryan Arnold, Director, System Technologies + +![class=small-inline left LKFT icon](/linaro-website/images/blog/lkft) + +At Linaro, we have often hosted artifacts from Amazon S3 using a custom tool known as Linaro License Protection (LLP). LLP started life serving files from local disk storage, then later moved to use Amazon S3. Technically LLP provides an S3 browsing interface. However it was never designed to run under a serverless architecture. This coupled with other necessary Linaro/License features (such as authentication) means that LLP doesn’t fit a “simple serverless” model. + +Linaro is presently working on a SaaS offering called [TuxBuild](https://gitlab.com/Linaro/tuxsuite) (and companion service called TuxBoot). These technologies are implemented using the new serverless model and have a need to provide artifacts from cloud storage using a lightweight application that provides a file browser as a web-based user front end. + +The original implementation of Tuxpub used Javascript, but we quickly realised it wasn’t scalable, it wasn’t conformant with what web tools expect, and it lacked features which our users were demanding (such as the ability to pull the file contents in JSON) for browsing programmatically. After searching for existing solutions we discovered that there were no available light-weight tools to solve our problems! + +We built a wishlist of the following features and requirements that we felt a proper file server would honour and set about building tuxpub: + +* Serverless methodology for easy deployment and management +* Ability to block the index page so people cannot browse other folders +* Allow users to access a JSON output of the page for easy downloading + +The following is a sample file browser front-end being served by tuxpub for the TuxBuild project: + +![sample file](/linaro-website/images/blog/tuxpub_lrg) + +###### **How easy is it to deploy and manage?** + +Linaro can deploy our tuxpub instances with two lines of code and a config file! This procedure is documented in the TuxPub [readme](https://gitlab.com/Linaro/tuxpub#run-with-zappa). To bring up a TuxPub instance a developer only needs to create an application shim with the following zappa code: + +``` + "dev": { + "app_function": "zappa_init.app", + "aws_region": "us-east-1", + "project_name": "lkft-tuxpub", + "runtime": "python3.7", + "s3_bucket": "zappa-tuxpub", + "environment_variables": { + "S3_BUCKET": "storage.dev.lkft.org", + "S3_REGION": "us-east-1", + "ROOT_INDEX_LISTING": "True", + } + } +``` + +With these files a developer needs to build up a [pipenv](https://realpython.com/pipenv-guide/) file with `“pipenv install --deploy”`, and then deploy it into AWS Lambda with `“zappa deploy dev”`. + +One can even run the application locally with `S3_BUCKET=storage.dev.lkft.org S3_REGION=us-east-1 ROOT_INDEX_LISTING=True FLASK_APP=tuxpub flask run`. + +##### **What are the limitations?** + +Since tuxpub uses the AWS API, there are limitations set by the cloud provider. An index page with more than 1000 objects hits an API limit and generates a nasty error page. Because of this, we intend to implement ‘paging’ support. Tuxpub does not presently support user authentication and has no immediate plans to add it. + +##### **Can others use and contribute to tuxpub?** + +Linaro has made tuxpub available as open source software under the [MIT license](https://gitlab.com/Linaro/tuxpub/-/blob/master/LICENSE). This means that it’s free to deploy and modify. We’re very welcoming of pull requests! You can find the code [here](https://gitlab.com/Linaro/tuxpub). + +##### **What is the future of tuxpub?** + +Linaro’s objective is to keep this application simple! We are being selective and do not want to add too many features that would bloat the application. Desirable features additions (most notably paging support) are being collected in [tuxpub gitlab issues](https://gitlab.com/Linaro/tuxpub/-/issues) and addressed over time. + +### **RDK and i.MX8** + +By Tom Gall, Director, Linaro Consumer Group + +![class=small-inline left Multimedia icon](/linaro-website/images/blog/multimedia) + +The RDK 3.0 port to iMX8M reached a new milestone where https://rdkcentral.com/ now contains detailed information on how to [build](https://wiki.rdkcentral.com/display/RDK/Build+Procedure+for+64bit+RDK+Media+Client+using+Thud+Yocto+2.6) and also [run](https://wiki.rdkcentral.com/display/RDK/Run+RDK+3.0+Features+on+i.MX8MQ) the RDK 3.0 on MCIMX8M-EVK NXP board. In addition, work has already progressed rapidly on the migration to Yocto Dunfell LTS release which is documented [here](https://wiki.rdkcentral.com/display/RDK/Yocto+3.1+LTS+build+procedure+for+RDK-V+on+i.MX8MQ). The i.MX8M SoC has become the Linaro reference SoC for secure video path developments for the major ecosystems Linaro is involved with for secure video (RDK, Linux & AOSP) where a fully secure video pipeline is required. + +Features showcased in the i.MX8M RDK port include the App Manager https://www.sparkui.org/ framework. This is a cross platform application engine that allows STB applications to be written in JavaScript but access the native rendering functionality of the underlying platform. The other main showcased feature is the Thunder application framework (aka, WPEFramework) and the integration of DRM technologies from Linaro into the wpewebkit browser to facilitate the playback of protected content. Linaro has upstreamed many patches to [meta-wpe](https://github.com/WebPlatformForEmbedded/meta-wpe), [Thunder](https://github.com/rdkcentral/Thunder), [ThunderNanoServices](https://github.com/rdkcentral/ThunderNanoServices), [WPEWebKit](https://github.com/WebPlatformForEmbedded/WPEWebKit) and the ocdm-\* plugins as part of this project. It has been an example of the productive collaboration that can happen inside Linaro between Comcast, NXP and Linaro engineers. + +### **Raspberry Pi Libcamera Initiative** + +By Tom Gall, Director, Linaro Consumer Group + +![class=small-inline left Raspberry Pi Libcamera icon](/linaro-website/images/blog/pi-lib-camera) + +The [libcamera](http://libcamera.org/) project reached a big new milestone with the joint announcement from RPi foundation and libcamera projects on the first fully open source camera stack including 3A (auto exposure, auto gain control, auto white balance) algorithms. This is the first SoC in libcamera to become fully enabled in terms of 3A, and as far as we are aware the first time 3A algorithms have been fully open sourced in any meaningful way. More about the announcement can be found [here](https://www.raspberrypi.org/blog/an-open-source-camera-stack-for-raspberry-pi-using-libcamera/.). + +### **LEDGE Reference Platform Stage 3 available** + +By Francois Ozog, Director, Linaro Edge and Fog Computing + +![class=small-inline left Ledge icon](/linaro-website/images/blog/ledge) + +The LEDGE Reference Platform builds on the Generic Kernel Image concept pioneered by Google for Android. The ultimate goal is that Linaro members and Linux distribution providers (commercial or not) can deliver a single binary image that can be booted on any Embedded Base Boot Requirement (EBBR) compliant platform. The LEDGE Reference Platform builds on the efforts of the Dependable Boot project which focuses on building an EBBR compliant firmware environment. + +In more technical terms, LEDGE Reference Platform is a lightweight highly secure and robust container runtime environment that has dependable boot and update capabilities. It comes with a full set of security policies with SELinux, Integrity Measurement Architecture enabled and other technologies that can be further adapted to specific markets. + +There are actually three images built: 64 bits, 32 bits, 32 bits with LPAE enabled. With Stage 3, the LEDGE Reference Platform can be booted with UEFI SecureBoot (U-Boot or EDK2) and it was verified that: + +* A single 64 bits image can be booted on QEMU, NXP LS21060ARDB, Socionext Synquacer( and expect to achieve this with kernel 5.8 on TI AM65XX) +* A single 32 bits image can be booted on TI AM572x and BEAGLEBOARD-X15, ST STM32MP157C-DK2 and QEMU + +Reference Platform stage 4 has started. It will come with an integrated standard and generalized firmware update features based on UEFI update capsules. We shall build on system-d defined boot-blessing capabilities to provide a robust boot orchestration scheme that will leverage hardware anti-bricking and anti-rollback features. + +### **Linaro Tech Days Sessions from LITE team** + +By Vicky Janicki, Linaro IoT and Embedded Group + +![class=small-inline left Lite icon](/linaro-website/images/blog/lite) + +The Linaro Embedded and IoT Team ran a 3 session series in April and May under the Linaro Tech Days Banner. Vincent Wan (TI) reviewed “[Power Management On Zephyr](https://resources.linaro.org/en/resource/GB6kdEf7QzYjYeSY2i7nGh)” to start. For the second session, Paul Sokolovskyy (Linaro) presented “[Update on LAVA Testing for Bare Metal Systems](https://resources.linaro.org/en/resource/2QZohk62hyRPhwVhHVbDRo)” which charted LITE’s effort to enhance LAVA to work more effectively with MCU’s. Manivannan Sadhasivam, a kernel engineer from Linaro Developer Services presented “[LoRa Meets Zephyr](https://resources.linaro.org/en/resource/CwG9Dco35WHyrdqRH7fWQM)” which included a brief history of LoRa, what is currently working and what remains. + +### **Trusted-Firmware-M Integration with Zephyr 2.3** + +By Kevin Townsend, Senior Engineer, Linaro IoT and Embedded Group + +![class=small-inline left Lite icon](/linaro-website/images/blog/lite) + +The upcoming 2.3 release of Zephyr now features out of the box support for Trusted-Firmware-M, including hardware emulation via QEMU, meaning you don’t require physical access to a supported development board. + +Zephyr is configured to run in the non-secure processing environment, and TF-M is used in the secure processing environment, with communication between the two environments happening over TF-M’s IPC mechanism. Both secure and non-secure images are signed, and validated by the secure BL2 bootloader at startup. Zephyr applications can make direct use of the PSA APIs for Cryptography, Initial Attestation, etc., and the IPC mechanism will be handled transparently from an application point of view. + +##### **General Requirements** + +This post details some of the steps required to test TF-M integration in Zephyr using QEMU, with only minor changes required to run the samples on actual hardware. + +**NOTE** : Zephyr currently supports TF-M integration with the MPS2 AN521 and Musca B1 board targets, with LPCXpresso55S69 support planned in the near future now that LPC55S69 support is available upstream in TF-M. The AN521 build target has been setup in Zephyr to optionally work with QEMU. + +Zephyr 2.3 RC1 was used writing this, but 2.3 may be finalised by the time you read this. The instructions found here should remain consistent with anything from 2.3 RC1 and higher. + +At present, the TF-M integration has been tested on the following platforms, and will not work on Windows out of the box: + +* Ubuntu 18.04 using Zephyr SDK 0.11.3 +* macOS Mojave using QEMU 4.2.0 with gcc-arm-none-eabi-7-2018-q2-update + +##### **Zephyr Setup** + +Follow Zephyr’s Getting Started Guide available [here](https://docs.zephyrproject.org/latest/getting_started/index.html). + +##### **TF-M Setup** + +TF-M has a few additional requirements to enable building the secure-side firmware image. The following Python packages must be available on your system, since they are used by TF-M when signing binary images for secure bootloader verification at startup: + +``` +$ pip3 install --user cryptography pyasn1 pyyaml cbor>=1.0.0 +``` + +Additionally, the **srec\_cat** utility is required when merging signed application images at the end of the build process. This can be installed via the **srecord** package available on both Linux(-y) distributions and OS X via some variation of: + +``` +$ sudo apt-get install srecord +``` + +Or in the case of OS X: + +``` +$ brew install srecord +``` + +##### **QEMU Setup** + +If you are using the Zephyr SDK on Linux, QEMU 4.2.0 is already included in the SDK and no further action is required. + +If you are using OS X, however, you will also need to install QEMU and make it available on your system path. QEMU 4.0.0 included support for the AN521 target, but we recommend using QEMU 4.2.0 or higher, which is the release used in the Zephyr SDK and during the TF-M/Zephyr integration work. This is generally as easy as running: + +``` +$ brew install qemu +``` + +You can test the installation and path access with the following command: + +``` +$ qemu-system-arm --version +QEMU emulator version 4.2.0 +Copyright (c) 2003-2019 Fabrice Bellard and the QEMU Project developers +``` + +##### **Building a TF-M application** + +At this point we can build a test application in Zephyr via the following command sequence: + +``` +$ source zephyr/zephyr.sh +$ west build -p -b mps2_an521_nonsecure zephyr/samples/tfm_integration/psa_level_1/ -t run +``` + +This will cause TF-M to be built in the background, and the S image (from TF-M- and NS image (from Zephyr) will both be signed with a signature that the BL2 secure bootloader will accept, and an optional binary for QEMU will be generated in addition to the standard AN521 binary images. The **-t run** flag will cause QEMU to execute the specially prepared binary once the build process is complete, and you should see some variation of the following output: + +``` +[INF] Starting bootloader +[INF] Image 0: version=0.0.0+1, magic= good, image_ok=0x3 +[INF] Image 1: No valid image +[INF] Booting image from the primary slot +[INF] Bootloader chainload address offset: 0x80000 +[INF] Jumping to the first image slot +[Sec Thread] Secure image initializing! +TF-M isolation level is: 1 +Booting TFM v1.0 +*** Booting Zephyr OS build v2.3.0-rc1 *** +[00:00:00.003,000] app: app_cfg: Creating new config file with UID 0x155cfda7a +[00:00:03.516,000] app: att: System IAT size is: 453 bytes. +[00:00:03.516,000] app: att: Requesting IAT with 64 byte challenge. +[00:00:06.922,000] app: att: IAT data received: 453 bytes. + + 0 1 2 3 4 5 6 7 8 9 A B C D E F +00000000 D2 84 43 A1 01 26 A0 59 01 79 AA 3A 00 01 24 FF ..C..&.Y.y.:..$. +00000010 58 40 00 11 22 33 44 55 66 77 88 99 AA BB CC DD X@.."3DUfw...... +00000020 EE FF 00 11 22 33 44 55 66 77 88 99 AA BB CC DD ...."3DUfw...... +00000030 EE FF 00 11 22 33 44 55 66 77 88 99 AA BB CC DD ...."3DUfw...... +00000040 EE FF 00 11 22 33 44 55 66 77 88 99 AA BB CC DD ...."3DUfw...... +00000050 EE FF 3A 00 01 24 FB 58 20 A0 A1 A2 A3 A4 A5 A6 ..:..$.X ....... +00000060 A7 A8 A9 AA AB AC AD AE AF B0 B1 B2 B3 B4 B5 B6 ................ +00000070 B7 B8 B9 BA BB BC BD BE BF 3A 00 01 25 00 58 21 .........:..%.X! +00000080 01 FA 58 75 5F 65 86 27 CE 54 60 F2 9B 75 29 67 ..Xu_e.'.T`..u)g +00000090 13 24 8C AE 7A D9 E2 98 4B 90 28 0E FC BC B5 02 .$..z...K.(..... +000000A0 48 3A 00 01 24 FA 58 20 AA AA AA AA AA AA AA AA H:..$.X ........ +000000B0 BB BB BB BB BB BB BB BB CC CC CC CC CC CC CC CC ................ +000000C0 DD DD DD DD DD DD DD DD 3A 00 01 24 F8 20 3A 00 ........:..$. :. +000000D0 01 24 F9 19 30 00 3A 00 01 24 FD 81 A5 01 63 53 .$..0.:..$....cS +000000E0 50 45 04 65 30 2E 30 2E 30 05 58 20 BF E6 D8 6F PE.e0.0.0.X ...o +000000F0 88 26 F4 FF 97 FB 96 C4 E6 FB C4 99 3E 46 19 FC .&..........>F.. +00000100 56 5D A2 6A DF 34 C3 29 48 9A DC 38 06 66 53 48 V].j.4.)H..8.fSH +00000110 41 32 35 36 02 58 20 C9 16 54 69 9C 13 DA 27 43 A256.X ..Ti...'C +00000120 5C D1 28 80 3D B6 B3 50 0A BC 70 87 39 97 BF 5E \.(.=..P..p.9..^ +00000130 9A 58 53 7E 24 4D F1 3A 00 01 25 01 77 77 77 77 .XS~$M.:..%.wwww +00000140 2E 74 72 75 73 74 65 64 66 69 72 6D 77 61 72 65 .trustedfirmware +00000150 2E 6F 72 67 3A 00 01 24 F7 71 50 53 41 5F 49 4F .org:..$.qPSA_IO +00000160 54 5F 50 52 4F 46 49 4C 45 5F 31 3A 00 01 24 FC T_PROFILE_1:..$. +00000170 72 30 36 30 34 35 36 35 32 37 32 38 32 39 31 30 r060456527282910 +00000180 30 31 30 58 40 51 33 D9 87 96 A9 91 55 18 9E BF 010X@Q3.....U... +00000190 14 7A E1 76 F5 0F A6 3C 7B F2 3A 1B 59 24 5B 2E .z.v...<{.:.Y$[. +000001A0 67 A8 F8 AB 12 6E 7F 97 FB 28 35 97 89 A5 56 61 g....n...(5...Va +000001B0 8F 00 4E A7 D1 37 5B E5 C1 6A 30 3C F2 00 97 17 ..N..7[..j0<.... +000001C0 04 0F 91 74 DA ...t. +[00:00:06.964,000] app: Persisting SECP256R1 key as #1 +[00:00:09.402,000] app: Retrieving public key for key #1 + 0 1 2 3 4 5 6 7 8 9 A B C D E F +00000000 04 47 EA AE D9 D6 6D 2E 1D 65 05 F5 04 FE CC 21 .G....m..e.....! +00000010 99 BE 5E 5A 56 6B 4F 1E 0C 43 E2 5B CE 1B 7D 06 ..^ZVkO..C.[..}. +00000020 D7 B3 71 E2 0A 3C 47 ED 84 9F 65 0E DB F9 3D D2 ..q.. app: Calculating SHA-256 hash of value + 0 1 2 3 4 5 6 7 8 9 A B C D E F +00000000 50 6C 65 61 73 65 20 68 61 73 68 20 61 6E 64 20 Please hash and +00000010 73 69 67 6E 20 74 68 69 73 20 6D 65 73 73 61 67 sign this messag +00000020 65 2E e. + 0 1 2 3 4 5 6 7 8 9 A B C D E F +00000000 9D 08 E3 E6 DB 1C 12 39 C0 9B 9A 83 84 83 72 7A .......9......rz +00000010 EA 96 9E 1D 13 72 1E 4D 35 75 CC D4 C8 01 41 9C .....r.M5u....A. +[00:00:11.853,000] app: Signing SHA-256 hash + 0 1 2 3 4 5 6 7 8 9 A B C D E F +00000000 81 FC CE C2 02 96 79 E0 60 A8 0C 53 22 58 F3 17 ......y.`..S"X.. +00000010 7A AC 46 60 7E 30 7F 60 03 53 1C 43 CA 31 97 B8 z.F`~0.`.S.C.1.. +00000020 47 47 56 E9 19 45 F9 E2 DC 38 68 8D F1 A7 C7 48 GGV..E...8h....H +00000030 96 26 F6 0C 0F 94 D8 E3 9E 66 82 76 A6 BC B4 FC .&.......f.v.... +[00:00:15.201,000] app: Verifying signature for SHA-256 hash +[00:00:20.987,000] app: Signature verified. +[00:00:23.441,000] app: Destroyed persistent key #1 +``` + +### Extending the TF-M Sample Applications + +The sample application above can easily be extended, or a new application can be started based on one of the samples available [here](https://github.com/zephyrproject-rtos/zephyr/tree/master/samples/tfm_integration). + +Consult the PSA API documentation or TF-M source code, linked below, for details on how to extend the samples: + +### **Key References** + +The following links are useful to further develop custom applications based on Zephyr 2.3+ and TF-M: + +* PSA API Documentation: Click [here](https://developer.arm.com/architectures/security-architectures/platform-security-architecture/documentation) +* TF-M Source Code:[](https://git.trustedfirmware.org/trusted-firmware-m.git/tree/) Click [here](https://git.trustedfirmware.org/TF-M/trusted-firmware-m.git/tree/). +* Zephyr’s fork of TF-M for any pull requests or bug reports: Click [here](https://github.com/zephyrproject-rtos/trusted-firmware-m).[](https://github.com/zephyrproject-rtos/trusted-firmware-m) + +### **Community News** + +By Mike Holmes, Foundational Technologies + +![class=small-inline left Stewardship icon](/linaro-website/images/blog/stewardship) + +The [Linux Test Project test suite stable release for "May 2020"](https://lwn.net/Articles/820636/) has been released with notable contributions from Linaro. Viresh Kumar (Kernel Working Group) was featured in the top three for his effort to provide complete coverage of the Syscalls in the LTP suite. diff --git a/src/content/blogs/linaro-featured-top-in-5-19-linux-kernel-release-the-first-kernel-to-be-released-on-arm64.mdx b/src/content/blogs/linaro-featured-top-in-5-19-linux-kernel-release-the-first-kernel-to-be-released-on-arm64.mdx new file mode 100644 index 0000000..640a158 --- /dev/null +++ b/src/content/blogs/linaro-featured-top-in-5-19-linux-kernel-release-the-first-kernel-to-be-released-on-arm64.mdx @@ -0,0 +1,87 @@ +--- +title: "Linaro featured top in 5.19 Linux Kernel Release - the first kernel to + be released on Arm64 " +description: "In this blog we look at Linaro's contributions to the 5.19 Linux + Kernel Release. " +date: 2022-08-16T08:48:30.000Z +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - linux-kernel + - arm +author: linaro +related: [] + +--- + +The 5.19 kernel was released at the end of July and saw Linaro featured in the top three in terms of most active employers (according to [LWN's monthly development stats](https://lwn.net/Articles/902854/#:~:text=The%205.19%20kernel%20was%20released,Retbleed%20mitigations%2C%20on%20July%2031.)). + +![List of most active employers in the 5.19 Linux Kernel Release](/linaro-website/images/blog/5.19-most-active-employers) + +While this is impressive given the size of Linaro, there is something even more exciting about this release which is worth celebrating. + +For the first time ever, Linus Torvalds [released a new kernel](https://lore.kernel.org/lkml/CAHk-=wgrz5BBk=rCz7W28Fj_o02s0Xi0OEQ3H1uQgOdFvHgx0w@mail.gmail.com/T/#u) on an Arm64 machine. For a long time many, including Linus himself, have felt developing on Arm just wasn’t an option - the hardware just wasn’t quite there. Until now. + +So why are we excited about this? Linaro was formed to consolidate the Arm code base back in 2010 which, at the time, was quite fragmented. Through important development and maintenance efforts on the building blocks which support Arm devices in the kernel, Linaro has contributed to making this achievement possible. + +It is fair to say that without all the work Linaro has done to enhance and improve the Arm software ecosystem as a whole, the Arm development platform would not be what it is today. + +# Linaro Statistics from the 5.19 Linux Kernel Release + +Now back to the stats… Quite a few of our Kernel Engineers were featured as top contributors to the 5.19 Linux Kernel release. We asked them to talk about the work they did which helped place Linaro third in the list for the most active employers. + +![List of most active developers in the 5.19 Linux Kernel Release](/linaro-website/images/blog/5.19-kernel-stats) + +## Krzysztof Kozlowski + +Krzysztof converted several Devicetree bindings to DT schema and improved or fixed many others. Along with improvements in DTS files for Qualcomm and Samsung ARM/ARM64 platforms, this gave him 1st place in v5.19 active contributors by number of changesets. Krzysztof also worked on various small fixes around memory controllers, interconnects and a few other drivers. + +A significant portion of Krzysztof’s time is now being spent reviewing Devicetree bindings, as the DT bindings maintainer. With 144 reviewed tags (and 110 Acked-by for smaller DT patches) this effort put him in fifth place among top reviewers of v5.19 kernel. + +![List of most active reviewers in the 5.19 Linux Kernel Release](/linaro-website/images/blog/5.19-review-credits) + +## Arnd Bergmann + +Arnd completed the work to bring all Armv4T and Arm5 platforms into a single multiplatform kernel configuration, the same way that one can build a kernel for all Arm8, all Arm6 and Armv7, or all x86-64 machines. This concludes work that has been ongoing [since 2012](https://lore.kernel.org/linux-arm-kernel/1349135827-24790-13-git-send-email-olof@lixom.net/). While the newer platforms were already converted within a short period of time, and the last ARMv6 and ARMv7 based platforms were all done [in 2016](https://lore.kernel.org/linux-arm-kernel/1453338882-31300-4-git-send-email-olof@lixom.net/), the older platforms generally have fewer users and developers, so this took longer. The last platforms to get converted now are TI OMAP1, Intel/Marvell PXA, Intel IXP4xx, Intel IOP32x, Cirrus Logic EP93xx and Samsung S3C24xx. The only exclusions are the even older Intel StrongARM (ARMv4) based machines that still require a custom kernel build: + +[ARMv4T/v5 multiplatform support for v5.19, part 1](https://lore.kernel.org/linux-arm-kernel/CAK8P3a3gqQbZG5gdh_cRmGx8B6XR8CGYcXN7wMu-YmCBwD1wGQ@mail.gmail.com/) +[ARM: multiplatform changes, part 2](https://lore.kernel.org/linux-arm-kernel/CAK8P3a13uAiBJkqD9UMmnfFn3AAY2ZqQisVQdovRy5dKiyJaXQ@mail.gmail.com/) + +This work already enabled a number of cleanups of the 32-bit Arm codebase and led to the next step, which is the deprecation of the majority of the last 196 machine definitions that have not yet been converted to device tree. A patch series to mark all machines with no known users as deprecated has been merged for linux-6.0 and the machines are planned to finally be removed in [early 2023](https://lore.kernel.org/linux-arm-kernel/CAK8P3a0ht1tG2nVzh1Shm0v8orQTa0VWOVkhvX9daF4yu6u8Sg@mail.gmail.com/). + +In addition, Arnd merged well over 1000 patches for modern Arm based platforms from other developers through [the SoC tree in linux-5.19](https://lore.kernel.org/linux-arm-kernel/CAK8P3a1K_t-a4=uKPbZ2kwa13bDhkNC9S8ZiyhF84SSXJYjT2w@mail.gmail.com/) and another set of changes in the asm-generic tree, which included the addition of the new loongarch64 architecture as well as the removal of the old Renesas H8300 architecture. + +## Dmitry Baryshkov + +Dmitry continued his work on maintaining the Qualcomm display drivers, by unifying common parts of the MDP5 and DPU code bases, removing the legacy eDP driver support, and improving the use of drm\_bridges throughout the drivers. In addition to this, Dmitry's work on adding support for the PCIe root-complex for Qualcomm Snapdragon 8 Gen 1 (e.g. SM8450) was accepted. + +## Bjorn Andersson + +Bjorn continued the upstreaming of support for the Qualcomm Snapdragon 8cx Gen3 compute platform, with contributions in the form of clock drivers, interconnect providers, power-domains and remoteproc support. His work on the Qualcomm Light Pulse Generator PWM and LED driver was at last accepted, after a very long development process. Progress was also made on the journey to get external display functional on Qualcomm platforms, through improvements necessary in the USB Type-C mux and orientation switch handling code. + +On the maintainer side, the bulk of the contributions came from a continuously active Qualcomm community, which resulted in Bjorn accepting patches touching upon 84 different boards, across 31 different Qualcomm platforms. In addition to work extending existing board support and adding new boards, a significant portion of these patches was improvements towards enabling DeviceTree binding validation. + +## Manivannan Sadhasivam + +Manivannan upstreamed the [Modem Host Interface (MHI) bus stack for the PCIe Endpoint devices](https://www.linaro.org/blog/mhi-bus-for-endpoint-devices-upstreamed-to-linux-kernel/), that gave him the #21st spot in the most active developers by changed lines for v5.19 release. Earlier, Manivannan upstreamed the [MHI bus stack for the PCIe host devices](https://www.linaro.org/blog/mhi-bus-support-gets-added-to-the-linux-kernel/) that is currently being used with various host platforms based on x86, ARM64 and MIPS architectures for bringing the network connectivity using Qualcomm modems and WLAN devices. + +This MHI bus support for PCIe endpoint devices will allow running the upstream kernel directly on the Qualcomm modems, such as the Snapdragon SDX55 platform, WLAN devices or boards with pairs of SoCs bridged using PCIe that used to run only the downstream kernel so far. Manivannan validated the PCIe endpoint work on the Telit FN980M modem based development board and was able to get the network connectivity over PCIe on the host using MHI IP\_SW0 channels. + +There is still some work pending for getting the data connectivity from the modem DSP and that is expected to land in the future releases. + +![List of most active non-author signoffs in the 5.19 Linux Kernel Release](/linaro-website/images/blog/5.19-kernel-non-author-signoffs-) + +In addition to being the third most active employer in terms of changesets and lines changed, Linaro was also top of the list for non-author signoffs. This demonstrates the crucial role Linaro’s Maintainers play in accepting and upstreaming patches, thus moving the Arm software ecosystem forward. + +## Shawn Guo + +As one of i.MX platform maintainers, Shawn helped to review and collect i.MX device tree and platform drivers patches for 5.19 Linux Kernel. With that effort, we have a number of i.MX8M Plus SoC based devices supported by 5.19 Kernel, i.e. Engicam i.Core MX8M Plus SoM and EDIMM2.2 Starter, Toradex Verdin i.MX8MP devices, Gateworks GW7400 series. + +## Vinod Koul + +Vinod reviewed and accepted patches for a variety of platforms across the DMAengine, PHY and SoundWire subsystems. + +### Conclusion + +Linaro’s position in the 5.19 kernel release and the number of Linaro kernel engineers featured as most active contributors is true testament to the crucial role Linaro continues to play in advancing the Arm software ecosystem through feature enablement, testing and maintenance. We look forward to Linus releasing more kernels on Arm development platforms! + +You can find out more about Linaro’s role in the Linux kernel by checking out our [Upstream Maintainership project page](https://linaro.atlassian.net/wiki/spaces/UM/overview). diff --git a/src/content/blogs/linaro-forge-19-1-introducing-forge-ultimate-edition-and-region-profiling-capabilities.mdx b/src/content/blogs/linaro-forge-19-1-introducing-forge-ultimate-edition-and-region-profiling-capabilities.mdx new file mode 100644 index 0000000..36c6a6f --- /dev/null +++ b/src/content/blogs/linaro-forge-19-1-introducing-forge-ultimate-edition-and-region-profiling-capabilities.mdx @@ -0,0 +1,45 @@ +--- +title: 'Linaro Forge 19.1: Introducing "Forge Ultimate" edition and region + profiling capabilities' +description: In this blog we talk about the Linaro "Ultimate" edition and the + capabilities it provides. Read more here! +date: 2019-06-25T01:38:31.000Z +image: linaro-website/images/blog/CCS_banner_image +tags: + - hpc +author: patrick-wohlschlegel +related: [] + +--- + +[Arm Forge 19.1](https://www.linaroforge.com/documentation/) is now available. This new major version includes the launch of a new Arm Forge Ultimate edition and the introduction of "region profiling", leveraging LLNL's work on Caliper. + +Introduction of Arm Forge Ultimate +By popular request, we are launching Forge Ultimate, a new edition of Forge including DDT, MAP and Performance Reports in a single bundle. This new offering has been created to answer the needs of: + +* **HPC development centres** who offer computing access to a wide range of very different end-users. This includes those looking for in-depth performance analysis from MAP and those seeking a simple view of the broad metrics provided by Performance Reports. These centres can now provide to their users the full capabilities of Arm's oprimization product offering by relying on one single package. +* **Professional code teams** who rely on dynamic analysis tools to feed their continuous delivery and automated testing frameworks. The ability to interface performance analysis reports with their systems helps code teams to identify and diagnose regressions much sooner. + +Forge Ultimate is the most cost-efficient way to access Arm's capabilities across all platforms. + +![forge editions table](/linaro-website/images/blog/forge-editions-table) + +# Caliper and MAP: A marriage made in heaven + +Created in 2014, LLNL has created a general purpose application introspection system called Caliper. This tool is designed for developers working with complex workflows and workloads which combine multiple packages, solvers and libraries. Caliper helps them improve their contextual understanding of an application. Using a simple API, users simply need to annotate their codes, link with the Caliper library and run their application to get valuable information. + +LLNL "regularly embed Caliper annotations in large codes developed in-house to outline domain-specific abstractions, such as kernels or physics packages", explains David Boheme, Computer Scientist at LLNL and key contributor to the Caliper project. + +![calipher image](/linaro-website/images/blog/calipher-image-forge) + +On the other hand, MAP, Arm's parallel profiler, is very data driven and collects a wide range of metrics at runtime. MAP is designed to help developers understand application performance bottlenecks and extract the last drop of performance from their applications. + +By relying on an interface between MAP and Caliper, users can now correlate regions information with performance metrics and data and associate regions with their application timeline. This innovative combination of technologies helps applications developers bring contextual information to their performance profiles. As David Boehme explains, "with the new Caliper support in Arm MAP, our developers can now easily navigate through MAP performance profiles using high-level abstractions rather than complex C++ call trees.” + +![calipher image 2](/linaro-website/images/blog/calipher-image-2-forge) + +Caliper is free, open-source and distributed [on Github](https://github.com/LLNL/Caliper). If you are interested, please let us know! Training materials and a technical webinar are being scheduled, and we would be delighted to send you an invitation. In the meantime, if you have time to try this new capability, please give it a good go and send us your feedback! + +# Conclusion + +I am excited to announce the availability of Arm Forge 19.1 with a new bundle and exciting new capabilities. Please get in touch to [request a trial](https://www.linaroforge.com/freeTrial/) or [buy a license](https://www.linaroforge.com/contactUs/). We plan to provide the next major release 20.0 towards the end of November 2019, with more features and improvements. diff --git a/src/content/blogs/linaro-high-up-the-list-for-most-active-linux-kernel-contributors-in-2022.mdx b/src/content/blogs/linaro-high-up-the-list-for-most-active-linux-kernel-contributors-in-2022.mdx new file mode 100644 index 0000000..1be8938 --- /dev/null +++ b/src/content/blogs/linaro-high-up-the-list-for-most-active-linux-kernel-contributors-in-2022.mdx @@ -0,0 +1,93 @@ +--- +title: Linaro high up the list for most active Linux Kernel contributors in 2022 +description: In this blog we look at Linaro's contributions to the Linux Kernel + for 2022 as well as for the latest 6.1 Kernel Release. Read more here! +date: 2022-12-20T12:04:50.000Z +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - linux-kernel + - testing +author: linaro +related: [] + +--- + +## Introduction + +The 6.1 Linux Kernel was released last week and featured Linaro yet again in [LWN’s lists](https://lwn.net/Articles/915435/) for most active developers and most active employers. In this blog we have asked Linaro developers to talk about the work they have done which is featured in this release. + +In LWN’s latest development statistics, they also look at who has been most active throughout the year. We are proud to say that a Linaro developer - Krzysztof Kozlowski - is the most active developer by changesets for 2022. Dmitry Baryshkov - another Linaro engineer - is also featured in the list for 20 most active developers. + +![List of most active developers 5.16 to 6.1 Linux Kernel Releases](/linaro-website/images/blog/most-active-developers-5.16-to-6.1) + +Linaro was the 6th most active employer by changesets and 4th most active by lines changed. + +![List of most active employers 5.16 to 6.1 Linux Kernel Releases](/linaro-website/images/blog/most-active-employers-5.16-6.1) + +On the list for most active maintainers and developers for non-author signoffs in 2022 we have Vinod Koul and Shawn Guo. Non-author signoffs are defined by the application of a Signed-off-by tag to a patch written by somebody else. This tends to happen when a Maintainer accepts a patch and adds it to their repository to eventually send upstream. Linaro is the second most active employer for non-author signoffs, no small feat for a company with 150 employees! + +These statistics are testament to our highly skilled Engineers and the influential roles they play in supporting open source communities and driving the Arm software ecosystem as a whole forward. + +![List of most non-author signoffs 5.16 to 6.1 Linux Kernel Releases](/linaro-website/images/blog/non-author-signoffs-5.16-6.1) + +Now let's get back to the latest 6.1 Kernel Release and find out what Linaro’s Engineers contributed with. + +![List of most active developers 6.1 Linux Kernel Release](/linaro-website/images/blog/most-active-6.1-developers-) + +## Krzysztof Kozlowski - Qualcomm Devicetree sources and bindings + +Krzysztof Kozlowski continued his work to bring down Devicetree bindings check warnings on Qualcomm SoCs DTS, so the DTS files are compliant with the DT bindings. This work led to several improvements in the Qualcomm bindings themselves (ASoC, display, pin controller, FastRPC and Slimbus) and in converting a few more bindings to new DT schema format. + +Another aspect of Krzysztof's work was on the Qualcomm Bandwidth Monitor (BWMON) driver, adding support for the second BWMON instance in SDM845 - present in the Last Level Cache Controller (LLCC). It monitors current throughput between LLCC and memory bus, thus providing aggregated memory throughput for the entire system. BWMON then places votes for bandwidth, so the system performance levels can be adjusted to match current needs. + +Krzysztof also contributed fixes to Slimbus drivers, several other platforms and bindings. As usual, Krzysztof was an active reviewer of Devicetree bindings, giving him second place for number of reviewer credits. + +## Dmitry Baryshkov - Qualcomm display subsystem driver + +Dmitry Baryshkov continued his work on the Qualcomm display subsystem driver (MSM DRM). During this cycle his contributions include rework of the IOMMU usage, changing the DSI (Display Serial Interface) driver to use a common code path for both the DSI panels and DSI bridges (by using the panel-bridge abstraction) and simplification of the DSI DSC (Display Stream Compression) usage for the DSI panels and bridges. Dmitry is a co-maintainer of the MSM DRM driver. + +Then Dmitry continued his work on converting Qualcomm clock drivers to use DT bindings to determine the incoming clocks, the way that all current kernel drivers are expected to behave. This time his contributions include MSM8916, MSM8939, MSM8960/APQ8064 and MSM8660 clock controller drivers. + +Last, but not least, his contributions include a rework of MSM8996 CPU clock drivers, working towards fixing stability issues on this platform. A work on this topic also caused contributions to the Linux Power State Coordination Interface (PSCI) implementation easing the debugging process for the misbehaving PSCI platforms. + +It’s worth mentioning that Dmitry’s continued work on the Linux kernel can also be noted in the long term perspective. If counted through the v5.16 to v6.1 timeframe, Dmitry is ranked on the 12th place by the amount of the changesets and on the 11th place if counted by the number of changed lines. + +## Johan Hovold - Support for Qualcomm SC8280XP + +Johan Hovold has worked on support for the Qualcomm SC8280XP platform and the Lenovo Thinkpad X13s laptop in particular. This release includes, for example, support for functional system suspend, PCIe driver support, preparatory QMP PHY driver and devicetree-binding work, and fixes for probe-deferral issues in the MSM DRM display driver. + +## Manivannan Sadhasivam - Support added for SM8540 to the Qualcomm PCIe Endpoint controller driver + +Manivannan Sadhasivam added support for the SM8450 SoC to the Qualcomm PCIe Endpoint controller driver that he upstreamed in v6.0 along with patches for improving the driver in general. This marked the first step towards establishing the communication between PCIe host and PCIe endpoint with Qualcomm SoCs. During this process, Manivannan became a maintainer of the Qualcomm PCIe RC driver. + +Manivannan also worked on improving the Designware eDMA driver by fixing the runtime PM support and reviewed the patches targeting the eDMA driver and PCIe Endpoint subsystem as a whole. + +## Daniel Lezcano - Thermal Rework + +The thermal framework is currently reworked to fix a design issue related to how the trip point violations are detected. The ordering of the trip point list is not guaranteed because of the multiple duplications of the thermal trip implementation in the different drivers. Without this ordering, the trip point violations can not be correctly handled. That impacts the thermal notifications events, the statistics and the governors, that can especially happen when the temperature is jittering around the threshold. This work will improve all the thermal drivers and simplify considerably the thermal framework code but it is a long term development which will take several releases to be completed. In addition, the userspace thermal daemons will benefit from consistent and ordered thermal events sequence which is not the case today. + +This kernel release had some fixes and cleanups regarding the simplification of the thermal OF code. The simplification introduced de facto the generic trip point into a single initialization path for the device tree based drivers. + +The monitoring loop and the locking scheme have been improved to close some race windows and simplify how the thermal mainloop watches the temperature. + +The Mellanox driver has been changed to remove the driver specific thermal decision aggregation as it was already supported by the thermal core code. + +And finally the thermal zone callback to set the trip point has been moved to the place it belongs to. + +All the thermal ARM drivers have been changed to support the generic trip points but the merge for this set of changes is postponed for the v6.3 release. + +![List of most test and review credits 6.1 Linux Kernel Release](/linaro-website/images/blog/test-and-review-credits-6.1-kernel-release) + +## Naresh Kamboju - Linaro Kernel Validation Test CI + +Naresh Kamboju, the Linaro kernel validation (LKFT) expert, continued his work in validating the various Linux subsystems by using the LKFT CI test architecture. LKFT is powered by fast builds and tests by using the Linaro Tuxsuite, Tuxmake and Tuxtriggers API tools for auto scalable parallel builds (up to 5000 concurrent jobs) for multiple architectures with various Clang and GCC toolchains. This has enabled LKFT to minimise the turn-around time for build validation and regression reports. Not only that -- reproducing bugs is now only a Tux command away! + +LKFT’s objective is kernel validation on Arm64 on Qualcomm SoC’s (DragonBoard 845C, DragonBoard 410C), ARM’s Juno-r2, FVP platforms, Hikey and Raspberry Pi 4 development boards, as well as X86 and various Qemu emulation platforms. + +While on this release LKFT and Naresh are featured prominently on the top-15 list of Tested-By’s, the work done by LKFT on testing patches and reporting problems traces back a long way before making it into Linux Mainline, as many of the reports happen on Linux Next. By letting the LKFT machinery run on a series of patches or a subsystem tree, kernel developers have access to a wide range of build architectures and to automated testing on physical hardware which would otherwise not be available to them. This imprints quality on the patches before getting merged on Mainline, like the CLK series before reaching Linux 6.1. + +![List of most active employers 6.1 Linux Kernel Release](/linaro-website/images/blog/6.1-most-active-employers) + +## Conclusion + +As can be seen from the development statistics for 2022, Linaro’s highly skilled engineers continue to play a crucial role in advancing the Arm software ecosystem. To find out more about Linaro’s role in the Linux kernel, check out our [Upstream Maintainership project page](https://linaro.atlassian.net/wiki/spaces/UM/overview). Alternatively if you would like to work with Linaro’s experts on successfully building and deploying your Arm-based product, read about [the services we provide](https://www.linaro.org/services/) here. diff --git a/src/content/blogs/linaro-in-openstack.mdx b/src/content/blogs/linaro-in-openstack.mdx new file mode 100644 index 0000000..be3847e --- /dev/null +++ b/src/content/blogs/linaro-in-openstack.mdx @@ -0,0 +1,58 @@ +--- +title: Linaro is now part of OpenStack +description: Linaro developers now use OpenStack as a way of sharing servers’ + resources with coworkers instead of just embedded and mobile space. Read more + here. +date: 2020-01-16T02:35:45.000Z +image: linaro-website/images/blog/DataCenter +tags: + - datacenter + - arm +author: marcin-juszkiewicz +related: [] + +--- + +During the first years of OpenStack’s existence, Arm architecture was mostly used in the embedded and mobile space. Servers were not part of the picture. + +In 2011 came the AArch64 – 64-bit architecture from Arm Ltd. Two years later the first servers arrived. Distributions started supporting new architecture, several embedded/mobile devices landed in the hands of users and developers. More and more projects added support for AArch64/Arm64 architecture. + +At that point, Linaro developers started looking at OpenStack as a way of sharing servers' resources with coworkers. + +### The first cloud + +'Liberty' was the first OpenStack release we used to run what later became Linaro Developer Cloud. There were hacks, missing features, limited permissions but it worked for us. + +And not only us – through Linaro Developer Cloud we were able to provide a way for external projects to get access to AArch64 instances. Several hundreds of teams applied and got access. It was a great success. + +But from a maintenance point of view it was a nightmare – far too many hacks and out-of-tree patches. So we looked at how to make it right. + +### Going Kolla + +In 2016 we selected the Kolla project as a way forward. An engineer was assigned (Marcin Juszkiewicz - also known as 'hrw') and cooperation started. + +Fast forward half a year and we have the first testing deployments using up-to-date code from Pike release. All built from upstream code without any extra patches or hacks. Then Queens followed with more complex scenarios. + +### Nova improvements + +In the meantime we provided Nova with some AArch64 related improvements. UEFI got set as default bootloader and it simplified life a lot (we had to set it for each image before). As graphical console support landed in libvirt and kernel, we integrated support for it as well. And then a way to configure emulated PCI Express slots as neither aarch64/virt nor x86-64/q35 machines had enough of them by default. + +### Rocky cloud + +We then decided to finally upgrade Linaro Developer Cloud, saying ‘goodbye’ to Liberty and moving forward with Queens (which was later updated to Rocky). This meant huge improvement to administrators and all users. Instead of giving bare instances, users got resources which they could split into instances using Horizon or CLI tools. + +### Going infra + +Going with Rocky allowed us to help OpenStack developers in a new way. In 2018 a set of resources was assigned and provided to the OpenStack infrastructure team. This gave access to AArch64 nodes for CI jobs. + +Running CI jobs allowed us to find new issues. Storage cluster needed improvements, network access to Chinese datacenter was a problem etc. We sorted out most of them with the move of Linaro Developer Cloud to London, UK and worked hard to get all problems resolved. + +As the amount of AArch64 nodes was small (just eight), we started working on a second instance of cloud just for OpenStack use. The datacenter told us that we could only get IPv6 address space. We accepted the deal and cooperated with Kolla developers on working and testing IPv6 support in Kolla and Kolla-Ansible projects. + +In the meantime we discussed with the OpenStack infrastructure team how to improve the CI situation on a small amount of nodes. To make life easier a new pipeline was created on CI – a ‘check-arm64’ one covering only AArch64 nodes. This allowed projects to add CI jobs without blocking standard set of checks. Turned out to be a good move. + +### Today + +Which brings us to today, January 2020. Linaro is now part of OpenStack MultiArch SIG which was just created to help projects work better on AArch64, Power and other non-x86 architectures. We are looking forward to cooperation here. + +What will the future bring? More nodes for sure. And more projects using them to test how they work on 64-bit Arm servers. diff --git a/src/content/blogs/linaro-in-top-five-for-most-active-contributors-to-the-6-0-linux-kernel-release.mdx b/src/content/blogs/linaro-in-top-five-for-most-active-contributors-to-the-6-0-linux-kernel-release.mdx new file mode 100644 index 0000000..f5a82c0 --- /dev/null +++ b/src/content/blogs/linaro-in-top-five-for-most-active-contributors-to-the-6-0-linux-kernel-release.mdx @@ -0,0 +1,85 @@ +--- +title: "Linaro in top five for most active contributors to the 6.0 Linux Kernel + Release " +description: "In this blog we talk about Linaro's contributions to the 6.0 Linux + Kernel Release. Read more here! " +date: 2022-10-06T02:51:13.000Z +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - linux-kernel + - open-source + - arm +author: linaro +related: [] + +--- + +The 6.0 Linux kernel was released at the beginning of October and saw Linaro featured yet again in the top five for most active employers (according to LWN’s monthly development stats). + +![List of most active employers in the 6.0 Linux Kernel Release](/linaro-website/images/blog/6.0-most-active-employers) + +We asked the Linaro Engineers who featured top in the lists for most active developers and for test and review credits to talk about the work they did which landed them in the lists. + +![List of most active developers in the 6.0 Linux Kernel Release](/linaro-website/images/blog/6.0-most-active-developers) + +## Krzysztof Kozlowski + +**Improvements to Devicetree bindings** + +Krzysztof was working mostly on improvements in Devicetree bindings and DTS for several platforms. Most of the efforts focused on Qualcomm SoCs, but many cleanups also hit other ARM and ARM64 platforms. Krzysztof also upstreamed the Qualcomm SoC bandwidth monitoring driver (bwmon). bwmon sits between various subsystems like CPU, GPU, Last Level caches and memory subsystems. The bwmon can be configured to monitor the data throughput between memory and other subsystems. The throughput is used to request appropriate performance (and power) state. + +With 155 review tags (and 136 acks), Krzysztof was also the fourth most active reviewer. The majority of these reviews were for Devicetree bindings. + +## Dmitry Baryshkov + +**Rework of the Qualcomm QMP PHY driver** + +Dmitry’s main contribution during this cycle was a rework of the Qualcomm QMP PHY driver. This driver supports most high-speed transceivers on recent Qualcomm platforms: UFS, USB 3, PCIe, DisplayPort. Through the development course this driver has evolved into complex and convoluted code, containing many device and type specific hooks, which made adding support for new platforms or features close to impossible. Dmitry has split this driver into smaller pieces, cleaned up type specific code and reworked platform-specific defines. This work opens a gate to fixing several issues which were hard to spot during the previous development cycles, and adding support for newer platforms. + +The next major working item for Dmitry was the driver for Qualcomm Display Subsystem (MSM DRM). While this cycle did not bring any big improvements, Dmitry continued working on cleaning up the driver, improving support for older and current Qualcomm platforms. + +The rest of Dmitry’s work in this development cycle was dedicated to small issues with Qualcomm platforms, like fixing PCIe MSI support, improving MSM8996 platform or adding support for Inforce IFC6560 single-board computer. + +## Arnd Bergmann + +**The state of the traditional “board file” support on 32-bit machines** + +As part of Arnd’s SoC Maintainer role, he revisited the state of the traditional “board file” support on 32-bit machines. While all 2288 machines that were added in the past ten years have been based around device tree based probing, there are still around 200 32-bit arm boards left that use ATAGS based board files. Most of these never got converted to devicetree because there are no known users. After a survey started by Arnd, the 28 board files that most likely are still used were identified, and the rest were marked as unused in the kernel’s configuration system in linux-6.0, with the plan to remove them in linux-6.2 if no other users are found until then. + +Arnd also worked on finishing the cleanup of the pci\_mmap\_resource\_range() in-kernel interface across all CPU architectures, on eliminating the virt\_to\_bus() interface that was deprecated over 20 years ago, and on removing the arm32 “dmabounce” code that blocked the arm32 DMA mapping code from using the same infrastructure as all other architectures. + +The SoC tree contained a total of over 1000 patches in the 6.0 release that Arnd merged from downstream maintainers and [forwarded to Linus Torvalds](https://lore.kernel.org/linux-arm-kernel/20220802140200.3987874-1-arnd@kernel.org/t/#u). This includes support for two new SoC families: Nuvoton NPCM8XX and Sunplus SP7021. + +## Viresh Kumar + +**Redesigned the OPP core's platform specific configuration interface** + +As part of Viresh's OPP Maintainer role, he redesigned the OPP core's, platform specific, configuration interface in order to provide a simpler interface for platforms requiring multiple configurations. Previously the platforms had to call a resource specific helper, once for each configuration. With the recent changes, a single call is enough to take care of all configurations. This work was instrumental in providing support for multiple clocks per device in the OPP core, which is currently required for Qualcomm SoCs. Viresh also worked on patches to provide the multiple clock support along with Krzysztof Kozlowski. + +![6.0 test and review credits](/linaro-website/images/blog/6.0-test-and-review-credits) + +## Manivannan Sadhasivam + +**Reviewed and tested PCIe patches** + +Manivannan helped review and test the PCIe patches improving the Synopsys Designware (DWC) PCIe controller driver. The patches also served as preparation for adding the Embedded DMA (eDMA) support in the DWC PCIe controller driver for offloading the PCIe read/write operations to the host memory from endpoint device. + +## Overall Testing Statistics + +Linaro consistently ranks in the top ten companies when it comes to reviews, testing and reporting of regressions. We asked Naresh Kamboju - Linux Kernel Validation Engineer at Linaro - to share some statistics on our contributions. + +### Reviewed by - Linaro in 6th place + +Around 81 companies contributed their works to this kernel release v6.0 and [Linaro secured 6th position](https://remword.com/kps_result/5.20_review.html) in “Reviewed-by” of this kernel release by committing 571 Reviewed-by for this kernel release which is around 25% improvement for the previous release. + +![reviewed by stats 6.0 kernel release](/linaro-website/images/blog/reviewed-by-stats-6.0-kernel-release) + +### Tested by - Linaro in 7th place + +Around 55 companies contribute their works to this kernel release v6.0 and [Linaro secured 7th position](https://remword.com/kps_result/5.20_test.html) in “Tested-by” of this kernel release by committing 75 Tested-by for this kernel release which is around 300% improvement for the previous release. + +![tested by stats 6.0 kernel release](/linaro-website/images/blog/tested-by-stats-6.0-kernel-release) + +## Conclusion + +Through feature enablement, testing and maintenance, Linaro engineers continue to play a crucial role in advancing the Arm software ecosystem. To find out more about Linaro’s role in the Linux kernel, check out our [Upstream Maintainership project page](https://linaro.atlassian.net/wiki/spaces/UM/overview). diff --git a/src/content/blogs/linaro-introduces-the-first-parallel-debugger-for-python.mdx b/src/content/blogs/linaro-introduces-the-first-parallel-debugger-for-python.mdx new file mode 100644 index 0000000..ba146cd --- /dev/null +++ b/src/content/blogs/linaro-introduces-the-first-parallel-debugger-for-python.mdx @@ -0,0 +1,87 @@ +--- +title: Linaro introduces the first parallel debugger for Python +description: In this blog we talk about the first parallel debugger for Python + introduced by Linaro. Read more! +date: 2020-07-22T07:30:19.000Z +image: linaro-website/images/blog/CCS_banner_image +tags: + - hpc +author: patrick-wohlschlegel +related: [] + +--- + +# Introduction + +Linaro announces the release of Linaro Forge 20.1 featuring various enhancements and bug fixes across all products. In particular, this new release includes: + +* Native parallel debugging of Python applications in DDT +* Improvements to performance analysis of Nvidia GPU in both MAP and Performance Reports +* Simplifications to our packaging, through the integration of Performance Reports into the Forge installation files +* Support for the latest development environments for Arm-based servers. + +For more in-depth information including a full breakdown of the latest features and bug fixes please see release notes history. + +# Python Debugging + +Attach to mpi4py and serial applications by inserting `%allinea_python_debug%` into the command line: + +`1 bin/ddt mpirun -np 2 python3 %allinea_python_debug% python-blog.py` + +Or + +`1 bin/ddt python3 %allinea_python_debug% python-blog.py` + +The MPI version drops you into a stack starting at "`import mpi4py`". Using step-in, step-out and step-over in Python code all work in the same way as C, C++ and Fortran. + +![linaro ddt python debugging](/linaro-website/images/blog/linaro-ddt-python-debugging-1) + +All the same advanced breakpoint features as C and C++ are supported including function name, conditional and triggering every N hits breakpoints. +Setting a breakpoint in a loop with the condition i == 10 stops at that point, as can be seen from the local variables: + +![linaro ddt python debugging](/linaro-website/images/blog/linaro-ddt-python-debugging-2) + +The evaluation window can be used to inspect globals, locals or even execute Python expressions in the selected frame: + +![linaro ddt evaluation window](/linaro-website/images/blog/linaro-ddt-evaluation-window) + +As well as debugging Python, the stack also shows a merged view of Python and native code. So the steps that led up to some native code being executed are visible. For example, here is what the stack looks like when pausing in a numpy dot product which uses BLAS under the hood. Registers can be inspected and instructions can be stepped over using Forge's assembly debugging mode: + +![linaro forge's assembly debugging mode](/linaro-website/images/blog/linaro-forge-s-assembly-debugging-mode) + +# Performance Reports and Forge Integration + +Performance Reports is now distributed with Forge as a single combined installation, launched via `bin/perf-report` in the Forge installation directory. + +We have also renamed some lesser-used or behind-the-scenes binaries and scripts to be more appropriate. While this will not affect most users, any users of manual launch should use `forge-client` instead of (`ddt-client` or `allinea-client`) and users of .qtf scripts should use `forge-mpirun` in place of `ddt-mpirun`. + +# Revamp of GPU Metrics + +CUDA 10.2 and GPU Metrics are now supported on x86\_64 and PowerPC. We have removed the "GPU Temperature" and "Time Spent in Global Memory Accesses" metrics to provide a more stable metric collection mechanism that is consistent across supported platforms. + +GPU Utilization, GPU Memory Usage and GPU Power Usage are collected once the NVIDIA Management library is installed ([https://developer.nvidia.com/nvidia-management-library-nvml](https://developer.nvidia.com/nvidia-management-library-nvml)). Warp Stall Reasons and Line metrics are collected using MAP's CUDA Kernel Analysis feature based on CUPTI, CUDA's profiling interface. MAP supports profiling compiler optimized code but it is necessary to compile with the flag `-lineinfo` to use MAP's CUDA Kernel Analysis feature. CUDA Kernel Analysis can be enabled with the GUI's Run Dialog or with the command line using `--cuda-kernel-analysis`. An example workflow is + +``` +$ nvcc -O3 -g -lineinfo cuda_app.cu -o cuda_app +$ map --profile --cuda-kernel-analysis cuda_app +``` + +The following is a MAP profile of CloverLeaf\_CUDA on Oak Ridge National Laboratory's Summit. It demonstrates both GPU Metric collection and CUDA Kernel Analysis on PowerPC. + +![gpu metrics image](/linaro-website/images/blog/gpu-metrics-image) + +# Graphical Interface Refresh + +Forge has been updated to Qt 5, which means a crisper and more performant GUI, as well as bug fixes and stability improvements. In particular, macOS is better supported when in dark appearance mode. + +# Documentation + +## Developer and reference guides + +* Linaro Performance Reports is a merged component of the Linaro Forge product from version 20.1 onwards. + The Linaro Performance Reports user guide is now combined with the Linaro Forge user guide, and is available from [https://developer.arm.com/docs/101136/latest](https://developer.arm.com/documentation/101136/latest.). +* Linaro License Server user guide is available from [https://developer.arm.com/docs/101169](https://developer.arm.com/documentation/101169/latest/). + +# Conclusion + +Despite the very unusual times, we are all experiencing, the team has been able to push new, innovative features. With this release, Linaro is the first company to release a parallel debugger for Python which includes all the features one would expect. We are looking forward to hearing what you think, just click on the button below! diff --git a/src/content/blogs/linaro-releases-ledge-reference-platform-v0-2.mdx b/src/content/blogs/linaro-releases-ledge-reference-platform-v0-2.mdx new file mode 100644 index 0000000..eab26c2 --- /dev/null +++ b/src/content/blogs/linaro-releases-ledge-reference-platform-v0-2.mdx @@ -0,0 +1,60 @@ +--- +title: "Linaro releases LEDGE Reference Platform v0.2 " +description: " This blog talks about the LEDGE Reference Platform v0.2 release + and what new features users can expect to see. " +date: 2021-05-20T09:03:26.000Z +image: linaro-website/images/blog/IoT_Planet_UNDER_2MB +tags: + - arm + - u-boot + - linux-kernel +author: ilias-apalodimas +related: [] + +--- + +The majority of consumer electronics now consist of an abandoned firmware paired with an old kernel version. To make matters worse, those devices rarely support updates of either their firmware or software. There is a need for best practises for Firmware-OS interaction and its security extensions, to increase the security and maintainability of those devices. + +# Introducing LEDGE Reference Platform + +In an effort to empower any organization to easily create a Linux distribution for vertical markets, focusing on the high level features of the operating system, Linaro has created the LEDGE Reference Platform. LEDGE Reference Platform (RP) is a lightweight highly secure and robust container runtime environment that has dependable boot and update capabilities. It comes with a full set of security policies with SELinux, IMA (Linux Kernel Integrity Measurement Architecture) and other technologies and builds on [SystemReady-IR](https://developer.arm.com/architectures/system-architectures/arm-systemready/ir) and [EBBR](https://arm-software.github.io/ebbr/) specifications. Consumers of LEDGE-RP complement it with market specific components to make vertically integrated solutions. + +To see LEDGE RP in action, [check out this demo](https://www.youtube.com/watch?v=otciKqA0hdQ) from Linaro Virtual Connect Spring 2021 where Linaro’s Maxim Uvarov demoed LEDGE RP on ST32MP1 to showcase UEFI keys provisioning and direct booting of Linux. + +# What is new in LEDGE RP v0.2 release? + +Earlier this week we released LEDGE Reference Platform v0.2 which you can [download here](http://releases.linaro.org/components/ledge/rp-0.2/). The release contains many new features. The majority of the new features are enhancing the overall platform security, adding authentication and attestation mechanisms based on established standards: + +* UEFI SecureBoot enabled on all platforms +* UEFI variable management in secure world via OP-TEE (Aarch64 only for now), which provides secure rollback protected storage for critical system variables +* PARSEC support for TPMv2 (tested with fTPM), allowing mapping security APIs, in the language of choice, to security primitives found in various hardware. +* Arch agnostic UEFI protocol for loading initrd, allowing users to boot multiple kernels without GRUB. +* Updated OpenEmbedded to the latest Dunfell release +* U-Boot update to 2021.01 +* Linux kernel updated to 5.8 +* OP-TEE updated to 3.12 + +# Supported Platforms + +Although by design LEDGE-RP will run on any EBBR compliant platform, there are a number of devices that are officially supported. For these platforms Linaro provides Trusted Substrate firmware binaries that adhere to [SystemReady-IR](https://developer.arm.com/architectures/system-architectures/arm-systemready/ir) and [EBBR ](https://arm-software.github.io/ebbr/)specifications. [Trusted Substrate](/automotive-iot-and-edge-devices/) is a collaborative project for the integrated, tested and packaged foundation of open source secure boot and trusted execution elements. + +* QEMU Aarch64 (U-Boot and EDK2 support) +* QEMU Armv7 (U-Boot and EDK2 support) +* QEMU X86 (EDK2 Only) +* [STM32MP157C](https://www.st.com/en/evaluation-tools/stm32mp157c-dk2.html) (U-Boot only) +* [SynQuacer DeveloperBox](https://www.96boards.org/product/developerbox/) (EDK2, U-Boot support is WIP) +* BeagleBoard X15 (U-Boot only) + +Linaro has a track record of bringing Arm vendors together on the Linux kernel and is currently extending this to firmware with Trusted Substrate. + +# Future plans + +The next LEDGE Reference Platform release will be available in six months time. By the upcoming release we expect to see the following features realised: + +* Fully integrated A/B partition support for reference, providing anti-bricking and rollback protections +* Standardize firmware upgrades using open source tools (fwupd) +* UEFI Measured Boot support combined with Disk encryption based on TPMv2 with LUKS encryption by an authorized PCR policy for increased security +* Product quality Low power networking with normal tools rather than through debug file system controlf +* TPM for devices with RPMB support. This will provide platforms without a discrete TPM with a viable alternative + +For more information on the LEDGE Reference Platform click [here](https://github.com/Linaro/ledge-oe-manifest). To find out more about Linaro and the work we do, make sure to [contact us](https://www.linaro.org/contact/). diff --git a/src/content/blogs/linaro-s-rust-based-hypervisor-agnostic-vhost-user-i2c-backend.mdx b/src/content/blogs/linaro-s-rust-based-hypervisor-agnostic-vhost-user-i2c-backend.mdx new file mode 100644 index 0000000..7a25edd --- /dev/null +++ b/src/content/blogs/linaro-s-rust-based-hypervisor-agnostic-vhost-user-i2c-backend.mdx @@ -0,0 +1,133 @@ +--- +title: Linaro’s Rust based hypervisor-agnostic vhost-user I2C backend +description: "In this article, Viresh Kumar talks about the work Linaro is doing + to develop hypervisor-agnostic abstract devices which enable all + architectures. " +date: 2022-05-17T00:15:46.000Z +image: linaro-website/images/blog/Banner_Virtualization +tags: + - open-source + - automotive +author: viresh-kumar +related: [] + +--- + +There is a growing trend towards virtualization in areas other than the traditional server environment. The server environment is uniform in nature, but as we move towards a richer ecosystem in automotive, medical, general mobile, and the IoT spaces, richer device abstractions are needed. [Linaro's Project Stratos](https://www.linaro.org/projects#automotive-iot-edge-devices_STR) is working towards developing hypervisor-agnostic abstract devices, leveraging virtio and extending hypervisor interfaces and standards to enable all architectures. + +# An open interface for guest virtual machines + +The Virtual Input/Output device (Virtio) standard provides an open interface for guest [virtual machines (VMs)](https://en.wikipedia.org/wiki/Virtual_machine). The standard provides for common devices, such as network and block storage, which have been designed for efficient performance in a paravirtualized environment. This is achieved by minimizing the number of potentially expensive context switches involved in any given device transaction. The open standard provides an extensible interface that can be implemented in a wide range of environments regardless of the choice of OS. + +# How does Virtio work? + +Virtio adopts a frontend-backend architecture that enables a simple but flexible framework. The backend (BE) virtio driver, implemented by the hypervisor running on the host, exposes the virtio device to the guest OS through a standard transport method, like [PCI](https://en.wikipedia.org/wiki/Peripheral_Component_Interconnect) or [MMIO](https://en.wikipedia.org/wiki/Memory-mapped_I/O). This virtio device, by design, looks like a physical device to the guest OS, which implements a frontend (FE) virtio driver compatible with the virtio device exposed by the +Hypervisor. The virtio device and driver communicate based on a set of predefined protocols as defined by the [virtio specification](https://github.com/oasis-tcs/virtio-spec), which is maintained by [OASIS](https://www.oasis-open.org/org/). The FE driver may implement zero or more Virtual queues (virtqueues), as defined by the virtio specification. The virtqueues are the mechanism of bulk data transport between FE (guest) and BE (host) drivers. These are normally implemented as standard ring buffers in the guest physical memory space. The BE drivers parse the virtqueues to obtain the request descriptors, process them and queue the response descriptors back to the virtqueue. The BE drivers are responsible for making sure the data from the transaction is processed; either by forwarding to real HW or some sort of device emulation. + +The FE virtio drivers and the virtio specification itself are not concerned with where virtqueue processing happens on the host. As such descriptors can be processed in user-space or kernel-space. Virtqueue processing can be offloaded to other entities such as a user daemon or kernel module by following the vhost protocol, which is referred to as "vhost-user" when implemented in user-space. The remainder of this article presents the implementation of a hypervisor-agnostic vhost-user I2C daemon recently introduced to the rust-vmm project. + +# Implementing the Virtio I2C Specification + +[The Virtio I2C specification](https://github.com/oasis-tcs/virtio-spec/blob/master/virtio-i2c.tex) and a [Linux i2c-virtio driver](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/drivers/i2c/busses/i2c-virtio.c?id=3cfc88380413d20f777dc6648a38f683962e52bf) have recently been upstreamed by Jie Deng. Both specification and driver have received further improvement from the author to enhance buffer management and support zero-length transactions. + +`virtio-i2c` is a virtual I2C adapter device, which provides a way to flexibly organize and use the host I2C controlled devices from the guest. All communication between the FE and BE drivers happens over the `requestq` virtqueue. The I2C requests always originate at the guest FE driver, where the FE driver puts one or more I2C requests, represented by the `struct virtio_i2c_req`, on the `requestq` virtqueue. The I2C requests may or may not be interdependent. If multiple requests are received together, then the host BE driver must process the requests in the order they are received on the virtqueue. + +``` +---- +struct virtio_i2c_req { + struct virtio_i2c_out_hdr out_hdr; + u8 buf[]; + struct virtio_i2c_in_hdr in_hdr; +}; +---- + +``` + +Each I2C virtio request consists of an `out_hdr`, followed by an optional data buffer of some length, followed by an `in_hdr`. The buffer is not sent for the zero-length requests, like for the SMBus `QUICK` command where no data is required to be sent or received. + +``` +---- +struct virtio_i2c_out_hdr { + le16 addr; + le16 padding; + le32 flags; +}; +---- + +``` + +The `out_hdr` is represented by the `struct virtio_i2c_out_hdr` and is always set by the FE driver. The `addr` field of the header is set with the address of the I2C controlled device. Both 7-bit and 10-bit address modes are supported by the specification, though only 7-bit mode is supported by the current implementation of the Linux FE driver. The `flags` field is used to show dependency between multiple requests, by setting `VIRTIO_I2C_FLAGS_FAIL_NEXT `(0b01), or to mark a request `READ` or `WRITE`, by setting `VIRTIO_I2C_FLAGS_M_RD` (0b10) for `READ` operation. + +As described earlier, `buf` is optional. The virtio I2C specification defines a feature for zero-length transfers, `VIRTIO_I2C_F_ZERO_LENGTH_REQUEST` (0b01). It is mandatory for both FE and BE drivers to implement this feature, which allows zero-length transfers (like SMBus `QUICK` command) to take place. + +For `WRITE` transactions, the buffer is set by the FE driver and read by the BE driver. For `READ` transactions, it is set by the BE driver and read by the FE driver after the response is received. The amount of the data to transfer is inferred by the size of the buffer descriptor. + +``` +---- +struct virtio_i2c_in_hdr { + u8 status; +}; +---- + +``` + +The `in_hdr` is represented by the `struct virtio_i2c_in_hdr` and is used by the host BE driver to notify the guest with the status of the transfer with `VIRTIO_I2C_MSG_OK` (0) or `VIRTIO_I2C_MSG_ERR` (1). + +Please refer to [the Virtio I2C specification](https://github.com/oasis-tcs/virtio-spec/blob/master/virtio-i2c.tex) for more details. + +## Rust based I2C backend + +Rust is attracting a lot of interest in the Linux world due to it's strong emphasis on performance and safety. Rust is a multi-paradigm, general-purpose programming language designed for performance and safety. It brings a lot of benefits to the table, especially[ memory-safety](https://en.wikipedia.org/wiki/Memory_safety) and safe [computer\_science](https://en.wikipedia.org/wiki/Concurrency) \[concurrency]. The Rust for Linux project is looking to bring those safety benefits to a subset of the kernels code. + +[The rust-vmm project](https://github.com/rust-vmm), an open-source initiative, was started back in late 2018, with the aim to share virtualization packages. The rust-vmm project lets one build custom [Virtual Machine Monitors (VMMs) and hypervisors](https://en.wikipedia.org/wiki/Hypervisor). This empowers other projects to quickly develop virtualization solutions, by reusing the components provided by rust-vmm, and better focus on key differentiators of their products. The rust-vmm project is organized as a shared ownership project, that so far includes contributions from Alibaba, AWS, Cloud Base, Google, Intel, Linaro, Red Hat and other individual contributors. The components provided by rust-vmm are already used by several projects, like Amazon's [Firecracker](https://github.com/firecracker-microvm/firecracker) and [Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor) started by Intel. The rust-vmm project currently hosts \~30 repositories (or Rust crates, equivalent of a C library), where each crate plays a specialized role in the development of a fully functioning VMM. + +One such component provided by the rust-vmm project is the vhost-user-backend crate, which has recently made its way to crates.io, the Rust community’s crate registry. The vhost-user-backend crate provides a framework to implement the vhost-user backend services. It provides necessary public APIs to support vhost-user backends, like a daemon control object (`VhostUserDaemon`) to start and stop the service daemon, a vhost-user backend trait (`VhostUserBackendMut`) to handle vhost-user control messages and virtio messages, and a vring access trait (`VringT`) to access virtio queues. A Rust trait tells the Rust compiler about functionality a particular type has and can share with other types. + +The [vhost-device](https://github.com/rust-vmm/vhost-device) workspace was recently created in the rust-vmm project to host per-device vhost-user backend crates. As of this writing, it contains the I2C device crate but others such as GPIO, RNG, VSOCK SCSI and [RPMB ](https://en.wikipedia.org/wiki/Replay_Protected_Memory_Block)are currently being developed and reviewed." + +The I2C vhost-device binary-crate (binary-crate generates an executable upon +build), supports sharing host I2C busses (Adaptors) and client devices with +multiple guest VMs at the same time with a single instance of the backend +Daemon. Once the vhost-device crate is compiled with `cargo build --release` +command, it generates the `target/release/vhost-device-i2c` executable. The +`vhost-device-i2c` daemon communicates with guest VMs over Unix domain sockets, +a unique socket for each VM. + +The daemon accepts these arguments: + +* -s, --socket-path: Path of the vhost-user Unix domain sockets. This is + suffixed with 0,1,2..socket\_count-1 by the daemon to obtain actual socket + paths. +* -c, --socket-count: Number of sockets (guests) to connect to. This parameter + is optional and defaults to 1. +* -l, --device-list: List of I2C busses and clients in the format + `:[:],:[:]]` + +As an example, consider the following command: + +``` +---- +./vhost-device-i2c -s ~/i2c.sock -c 6 -l 6:32:41,9:37:6 +---- + +``` + +This will start the I2C backend daemon, which will create 6 Unix domain sockets (`~/i2c.sock0`, .. `~/i2c.sock5`), in order to communicate with 6 guest VMs, where communication with each VM happens in parallel with the help of a separate native OS thread. Each thread, once created by the daemon, will wait for a VM to start communicating over the thread's designated socket. Once a VM is found for the thread, the thread registers a `vhost-user-backend` instance and starts processing the requests on the `requestq` virtqueue. At a later point in time, once the VM shuts down, the respective thread starts waiting for a new VM to communicate on the same socket path. In the above example, the daemon is also passed a list of host I2C busses and client devices, which are shared among the VMs. This is how sharing is defined in the daemon's implementation for now, though it can be modified later on, if required, to allow specific devices to be accessed only by a particular VM. In the above example, the devices provided by the host to the daemon are: devices with address 32 and 41 attached to I2C bus 6, and 37 and 6 attached to I2C bus 9. The daemon extensively validates the device-list at initialization to avoid any failures later, especially for duplicate entries. + +The `vhost-user-i2c` daemon supports both I2C and SMBus protocols, only basic SMBus commands up to word-transfer though. The backend provides the `pub trait I2cDevice`, a public Rust trait, which can be implemented for different host environments to provide access to the underlying I2C busses and devices. This is currently implemented only for the Linux user-space, where the I2C busses and devices are accessed via the `/dev/i2c-X` device files. For the above example, the backend daemon will look for `/dev/i2c-6` and `/dev/i2c-9` device files. The users may need to load the standard `i2c-dev` kernel module on the host machine, if not loaded already, for these device files to be available under `/dev/`. For a different host environment, like with a bare-metal type 1 +hypervisor, we need to add another implementation of the trait depending on how the I2C busses and devices are accessed. + +# Conclusion + +The `vhost-user-i2c` backend is truly a hypervisor-agnostic solution that works with any hypervisor which understands the vhost-user protocol. It has been extensively tested with QEMU for example, with Linux user-space environment.There has already been [a proof of concept implementation](https://www.google.com/url?q=https://connect.linaro.org/resources/lvc21/lvc21-314/\&sa=D\&source=docs\&ust=1652795535690140\&usg=AOvVaw3w2Bq_ENjfUe6ZYzICf7mO) of servicing a virtio-block device from a Xen guest. Work is in progress to make the Xen hypervisor vhost-user protocol compatible. Once that is achieved, we will be able to use the same `vhost-user-i2c` executable with both QEMU and Xen, for example, under the same host environment. + +Support for i2c-virtio is already merged in QEMU source, boilerplate stuff to create the i2c-virtio device in the guest kernel, and the i2c-virtio device can be created in the guest kernel by adding following command line arguments to your QEMU command: + +``` +---- +-chardev socket,path=~/i2c.sock0,id=vi2c -device vhost-user-i2c-device,chardev=vi2c,id=i2c +---- + +``` + +We have come a long way forward with the I2C vhost-user device implementation in the [vhost-device](https://github.com/rust-vmm/vhost-device) workspace. There is still a lot to do though, especially testing the same vhost-user backend executables with multiple hypervisors and adding support for more device crates. To find out more about the work we do on Rust based hypervisor-agnostic backends, check out our [Project Stratos page](https://linaro.atlassian.net/wiki/spaces/STR/overview). diff --git a/src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx b/src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx new file mode 100644 index 0000000..64c8cb6 --- /dev/null +++ b/src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx @@ -0,0 +1,28 @@ +--- +title: "Linaro Tech Days: A livestream event of technical sessions" +date: 2020-03-13T07:08:16.000Z +image: linaro-website/images/blog/30921188158_953bca1c9f_k +tags: + - linaro-connect + - arm + - open-source +author: connect +related: [] + +--- + +We were disappointed to have to [cancel Linaro Connect Budapest 2020](/blog/linaro-connect-budapest-2020-cancelled/). We had lots of great technical sessions scheduled and as such, want to do all we can to repurpose as much of the content as possible. + +We are therefore pleased to introduce Linaro Tech Days. + +Linaro Tech Days are a series of technical sessions that will be presented live online for anyone to join. Recordings and slides will be made available on [our Resources page](https://resources.linaro.org/) for those who are not able to join. + +When: Tuesday 24 - Wednesday 25 March 2020 + +There will be two tracks over 4 hours each day. We are planning on scheduling additional Linaro Tech Day sessions the following weeks and will announce more details shortly. + +For more information about Linaro Tech Days, please go to [the Connect page](/connect/) where you can view the schedule. Please note that to access joining instructions for each remote session, you will need to register. + +We hope to see you there! + +The Linaro Connect Team diff --git a/src/content/blogs/linaro-to-present-on-embedded-ai-and-virtualization-at-embedded-world-2022.mdx b/src/content/blogs/linaro-to-present-on-embedded-ai-and-virtualization-at-embedded-world-2022.mdx new file mode 100644 index 0000000..4f48f75 --- /dev/null +++ b/src/content/blogs/linaro-to-present-on-embedded-ai-and-virtualization-at-embedded-world-2022.mdx @@ -0,0 +1,38 @@ +--- +title: Linaro to present on Embedded AI and Virtualization At Embedded World 2022 +description: "In this blog Bill Fletcher talks about the two sessions Linaro + will present at Embedded World 2022 on Confidential AI and Virtualization in + Automotive. " +date: 2022-05-10T08:06:39.000Z +image: linaro-website/images/blog/ConAI_promo +tags: + - ai-ml + - iot-embedded + - virtualization + - automotive +author: bill-fletcher +related: [] + +--- + +Linaro will be presenting two important aspects of its ongoing engineering work at [the Embedded World Conference](https://www.embedded-world.de/en) at the end of June. Keep reading to find out more about each individual session. + +## Embedded Hypervisor: Ready for Prime Time? + +**Wednesday 22 June, 16.00 GMT +2** + +Francois Ozog from Linaro’s Edge Computing Group will be speaking about virtualization in embedded markets and in particular for automotive. Making sure that deterministic workloads can be operating as designed and migrating them to a backup zone in the context of a Software Defined Vehicle (SDV) can be quite a challenge for embedded product designers. This session will cover four themes to help architects get clarity on virtualization in their field: realtime, functional safety, new recipes, and confidential computing. Read more about [Software Defined Vehicles and the need for standardization in our white paper](https://static.linaro.org/assets/automotive_white_paper_0921.pdf). + +## Securing Embedded AI with Open Source Firmware + +**Thursday 23 June, 13.45 GMT +2** + +Bill Fletcher and Kevin Townsend from Linaro’s IoT Group will be speaking about Linaro’s Confidential AI Project in the conference session “Securing Embedded AI with Open Source Firmware”. Linaro’s IoT group primarily works on microcontrollers both on latest member company hardware and in advance of hardware on virtual platforms. The scope of the talk covers all aspects which can be protected by firmware mechanisms; the specifics of secure enclave execution of ML inference to protect both models and data; secure model storage, and configuration and update mechanisms. To find out more about the work we do in this space, check out [our white paper on Confidential AI for MCUs](https://static.linaro.org/assets/ConfidentialAI-LinaroWhitePaper.pdf). + +Linaro works with businesses and open source communities to develop software on Arm-based technology. We create solutions that drive forward the Arm software ecosystem, enhance standardisation, promote collaboration across industries and contribute to real-world applications. Linaro provides: + +* A unique safe haven for inter-company engineering collaboration with IPR policies, antitrust provisions, business neutrality and public articles of association +* Upstream open source focus - building the ecosystem with friction-free distribution, maximised ecosystem participation, established non-proprietary industry standards, cross-platform inclusivity, long-term support capability +* Collaboration processes and infrastructure for outreach, hosting, project lifecycle, operations, distributed multi-party multi-codebase project management + +If you’d like to schedule a meeting with the Linaro team face-to-face at Embedded World, please contact us at **confidential\_ai@linaro.org**. We hope to see you there! diff --git a/src/content/blogs/lldb-15-and-the-mystery-of-the-non-address-bits.mdx b/src/content/blogs/lldb-15-and-the-mystery-of-the-non-address-bits.mdx new file mode 100644 index 0000000..9b628d2 --- /dev/null +++ b/src/content/blogs/lldb-15-and-the-mystery-of-the-non-address-bits.mdx @@ -0,0 +1,354 @@ +--- +title: LLDB 15 and the Mystery of the Non-Address Bits +description: In this article David talks about the work Linaro has done on + non-address bits in LLDB. +date: 2022-09-08T01:17:07.000Z +image: linaro-website/images/blog/Banner_Linux_Kernel +tags: + - open-source + - toolchain +author: david-spickett +related: [] + +--- + +You’ve got a brand new Arm system. Lucky you! You reach for your favourite tools from the LLVM project. Including the debugger, [LLDB](https://lldb.llvm.org/). + +You’re debugging a simple C program and something odd happens. + +``` +(lldb) p buf +(char *) $0 = 0xa951fffff7ff8000 "" +(lldb) memory read buf +0xfffff7ff8000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +``` + +Where did the upper bits of the pointer go? It was 0xa951fffff7ff8000, then it was 0xfffff7ff8000. + +If you use a 64 bit Arm system that has [Top Byte Ignore](https://developer.arm.com/documentation/den0024/a/ch12s05s01), [Pointer Authentication](https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/armv8-a-architecture-2016-additions) or [Memory Tagging](https://www.linaro.org/blog/debugging-memory-tagging-with-lldb-13/), this might happen to you. Have no fear! Nothing is wrong and this can all be explained. + +Linaro has been contributing to LLDB since 2015 and most recently we, along with the LLDB community, have been focused on handling “non-address bits”. That’s what we’re going to delve into in this post. + +For the full list of changes in LLDB 15 go to https://releases.llvm.org/15.0.0/docs/ReleaseNotes.html#changes-to-lldb. + +# What are Non-Address Bits? + +The first thing to know is that the system I used in that example has 48 bit virtual addresses, which is set at the hardware level. + +Why not 64? Well, the likelihood of a single process actually wanting to map 2^64 bytes (16384 petabytes) of memory is zero (corrupted parameters passed to malloc don’t count). To give you an idea of scale, the AArch64 based [Fugaku super computer](https://www.fujitsu.com/global/about/innovation/fugaku/specifications/) has 4.8 petabytes of memory and that is split across 158,976 nodes (32GiB per node). + +Even if you would benefit from the capacity, what about the power use, the cost, the physical size of the modules? How will you get all that data into a core quickly? How many cores will share access to the memory and how will you stop them conflicting? + +Ultimately would you rather have many nodes with a smaller amount of memory each, for much simpler handling of all those concerns? This is what leads to the decision that 64 bits of virtual address space isn’t needed. + +Note: There is the option to go to 52 bit by using the Large Virtual Address (LVA) extension, taking us up to 4 petabytes. Not quite 1 Fugaku. + +With 48 bits we can address 256 terabytes of memory which is going to be more than enough for most applications. Which gives us some free bits in our pointers along with a host of hardware design benefits I’m not qualified to talk about. + +The 16 free bits can be used for: + +* [Top byte ignore](https://developer.arm.com/documentation/den0024/a/ch12s05s01) (aka TBI). An Armv8-a feature where the top byte of the pointer is ignored by the hardware. +* [Memory Tagging](https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/enhanced-security-through-mte) (aka MTE). An Armv8.5-a feature uses that top byte to store a 4 bit tag. This tag is used to detect memory safety issues. I wrote about this previously in “[Debugging Memory Tagging with LLDB 13](https://www.linaro.org/blog/debugging-memory-tagging-with-lldb-13/)”. +* [Pointer Authentication](https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/armv8-a-architecture-2016-additions) (aka PAuth). An Armv8.3-a feature that allows you to sign a pointer so that it can be verified at a later time. This uses the remaining unused bits. + +All of that is done without increasing the program’s memory usage. Take MTE for example, one software equivalent is [Address Sanitizer](https://source.android.com/docs/security/test/asan) (ASAN). ASAN has a memory usage overhead on the order of 2x (though MTE does have its own costs outside of memory usage). + +# Back to the Point(er) + +Let’s look at the layout of a pointer on this system. + +![Image of a layout of a pointer](/linaro-website/images/blog/layout-of-a-pointer-) + +Plugging in the address from the example in the introduction, we get: + +* Top 4 bit tag 0xa +* Memory tag 0x9 +* Pointer signature 0x51 +* Virtual address 0xfffff7ff8000 + +So now you see how LLDB went from a pointer with value 0xa951fffff7ff8000 to reading memory at 0xfffff7ff8000. + +LLDB refers to these non virtual address bits as “non-address bits”. Whenever it needs to know the real address a pointer refers to, it removes them. The obvious case is when accessing memory but this applies in many more situations. + +# How Can I Use Non-Address Bits? + +The simplest way to do this is to write a program targeting an AArch64 Linux system. Top Byte Ignore has always been [enabled for user space](https://www.kernel.org/doc/Documentation/arm64/tagged-pointers.txt) and you can use it on existing v8.0-a hardware. TBI can be used with pointer arithmetic directly from C. + +``` +#include + +void* tag_ptr(void* ptr, uint8_t tag) { + uintptr_t ptr_p = (uintptr_t)ptr; + uintptr_t tag_p = (uintptr_t)tag << 56; + uintptr_t mask_p = (uintptr_t)0xff << 56; + return (void*)((ptr_p & ~mask_p) | tag_p); +} + +int main() { + char buf[5]; + char* tagged_ptr = (char*)tag_ptr(buf, 0xcd); + // Use the pointer as normal, no masking needed. + *tagged_ptr = '?'; + return 0; +} +``` + +[Clang’s MemTagSanitizer](https://llvm.org/docs/MemTagSanitizer.html) uses Memory Tagging to protect stack allocations. This requires QEMU (setup guide for that [here](https://lldb.llvm.org/use/qemu-testing.html)). There are also [intrinsics](https://developer.arm.com/documentation/101028/0013/?lang=en) for MTE and you can fall back to assembly if needed. + +Clang compiling for Armv8.3-a can [protect return addresses automatically](https://clang.llvm.org/docs/ClangCommandLineReference.html#cmdoption-clang-mbranch-protection) using Pointer Authentication. There are currently no intrinsics for this so assembly is your only other option. Running the programs requires QEMU or a device such as an M1 Macbook. + +Note: Parts of pointer authentication are allocated as hint instructions. This means that if the hardware doesn’t support them they function as nops. Don’t be fooled by that. + +# My Program Already Uses Those Bits! + +When I talk in this post about non-address bits I’m focusing on the bits defined and handled by the hardware. + +Software has been using de facto non-address bits for a long time. Imagine you have an object that you know will be allocated on a 4 byte boundary. That means that the bottom 2 bits of its address will always be 0. That is, unless we decide to put some information there. + +You might call this “software defined” non-address bits. Making use of bits we know won’t hold useful information and as such, we can reconstruct them as needed. + +Of course if we wanted to then read this object we’d have to remove the bottom 2 bits. That’s the crucial difference between this “software defined” scheme and the hardware defined non-address bits. With the latter, you don’t have to modify the pointer before use (pointer authentication aside, that’s a special case). + +These software defined schemes aren’t prohibited by the hardware having any of the non-address bit extensions. Existing programs will run as before. What can cause issues is when you try to mix the extensions with these ad-hoc schemes without due care. + +Take pointer authentication. I could decide to sign all the return addresses within my language interpreter but keep my custom pointer tagging scheme for pointers to the interpreter’s state. As long as I don’t mix the two, everything is fine. + +Using hardware non-address bits can range from protecting a single pointer to using a whole new [Application Binary interface](https://developer.apple.com/documentation/security/preparing_your_app_to_work_with_pointer_authentication) (ABI), depending on the system’s needs. + +# How Do I Debug This? + +If you’re using any of these extensions, update to LLDB 15 for the best experience (and raise issues for the bad parts!). LLDB will handle all this for you. + +As ever though, just because a tool does the work for you doesn’t mean you shouldn’t have an idea of what that work is. Let’s look at a few examples of non-address bits in action. + +# Memory Access + +We started this post with an example of what is (as you now know) expected to happen on a system with non-address bits. Even if it is surprising at first. However, that was with LLDB 15. + +What do things look like with previous versions of LLDB? For this example I’m [using this source file](https://github.com/llvm/llvm-project/blob/9a976f36615dbe15e76c12b22f711b2e597a8e51/lldb/test/API/linux/aarch64/non_address_bit_memory_access/main.c) and one thing to note is that it uses Top Byte Ignore and Pointer Authentication at the same time. + +This is important because parts of TBI have worked with LLDB going back quite a while. This is mostly because Linux kernel interfaces (so far) accept pointers with those bits set. For pointer authentication, that’s not the case. + +Ordinary running code has to “authenticate” the pointer, which if successful, zeros the signature bits. Therefore LLDB has to do the same before asking the kernel for memory. + +## Memory Access With LLDB 10 + +``` +(lldb) p buf +(char *) $5 = 0x0000fffff7ff9000 "LLDB" +(lldb) p buf_with_non_address +(char *) $6 = 0xff24fffff7ff9000 "" +``` + +We have two pointers with the same virtual address. However the second fails to read what it points to. This is because LLDB 10 isn’t aware that there are non-address bits to remove. + +``` +(lldb) memory read buf buf+8 +0xfffff7ff9000: 4c 4c 44 42 00 00 00 00 LLDB.... +(lldb) memory read buf_with_non_address buf_with_non_address+8 +error: memory read failed for 0xff24fffff7ff9000 +``` + +If we ask for a memory read we find that buf works but buf\_with\_non\_address fails to read at all. If you think about the layers involved here, it’ll all make sense. + +LLDB 10 has sent the whole pointer to ptrace then the kernel has tried to read the memory at that address. The core ignores the top byte, leaving us with 0x24fffff7ff9000. This is greater than the mappable memory range (remember that we have 48 bit virtual addresses) so of course the read fails. + +## Memory Access With LLDB 15 + +``` +(lldb) p buf +(char *) $0 = 0x0000fffff7ff9000 "LLDB" +(lldb) p buf_with_non_address +(char *) $1 = 0xff75fffff7ff9000 "LLDB" +``` + +The second pointer now dereferences correctly because LLDB 15 knows to remove the non-address bits. + +``` +(lldb) memory read buf buf+8 +0xfffff7ff9000: 4c 4c 44 42 00 00 00 00 LLDB.... +(lldb) memory read buf_with_non_address buf_with_non_address+8 +0xfffff7ff9000: 4c 4c 44 42 00 00 00 00 LLDB.... +``` + +The two memory reads now give the same result, because they’re reading the same location and LLDB 15 knows this. + +# Memory Regions + +The most obvious change is that with LLDB 15 you can find what memory region a pointer refers to, regardless of the non-address bits. + +``` +(lldb) memory region buf_with_non_address +[0xff24fffff7ff9000-0xffffffffffffffff) – +``` + +Above is what happened prior to LLDB 15. Of course we show you an unmapped region because the pointer is outside the range of mappable memory. + +``` +(lldb) memory region buf_with_non_address +[0x0000fffff7ff9000-0x0000fffff7ffa000) rw- /dev/zero (deleted) +``` + +Now LLDB 15 is ignoring the non-address bits, so we get the correct region. + +The less obvious change is more a side effect than a feature but it’s good to think through nevertheless. + +It’s important to know here that the “memory region” command returns all mapped regions and the gaps between them. So what we see below is the stack, which is mapped, then unmapped space to the end of memory. + +``` +(lldb) memory region --all +<...> +[0x0000fffffffdf000-0x0001000000000000) rw- [stack] +[0x0001000000000000-0xffffffffffffffff) — +``` + +At least that’s what you see when there are no non-address bits. LLDB thinks you’ve got a 64 bit virtual address so of course 0xF…F is the end of mappable memory. + +``` +[0x0000fffff8000000-0x0000fffffffdf000) --- +[0x0000fffffffdf000-0x0001000000000000) rw- [stack] +``` + +Then we do the same with a 48 bit virtual address. See how mappable memory ends with the end of the stack? Makes sense. The end of the stack has bit 48 set as it’s one beyond the range of the 48 bit virtual address. + +This is unlikely to catch anyone out but at least you won’t think your memory has gone missing. Plus, it is more accurate. You never could map above bit 47 so that “unmapped” space was really “unmappable” space this whole time. + +# Memory Read Caching + +Reading memory is expensive. Whether that’s repeated system calls or sending packets to a debug target an ocean away. That’s why LLDB includes a memory cache (unrelated to the CPU’s own caches). + +When the program you’re debugging is halted, LLDB knows that memory can only change if LLDB is the one doing that change. So the result of every memory read is kept until there is reason to think that the memory content has changed. + +There are 2 main ways this happens. First is when there is a write to a specific range of memory. For example you “memory write some\_buf”. LLDB will clear all lines in the memory cache for that range, then send the write on its way to the target. Next time you read “some\_buf”, LLDB will do a new memory read for that range and put the result in the cache. + +The second case is when the program resumes. At this point the program’s code could be (and almost certainly is) modifying memory. LLDB doesn’t make any attempt to track that. Only when the program halts again will we start to fill the (now empty) memory cache. + +How do non-address bits come into this? Without non-address bits we have a 1 to 1 mapping between pointer values and addresses in the cache. + +With non-address bits, we may have multiple pointer values pointing to the same address in the cache as long as their virtual addresses are the same. If the cache doesn’t know this, we’ll waste a lot of time re-reading memory that we already have because it thinks they’re distinct locations. + +LLDB 15’s memory cache has been updated to be non-address bit aware and avoid that issue. + +``` +(lldb) log enable gdb-remote packets +(lldb) p mte_buf +lldb < 21> send packet: $xfffff7ff8000,200#ff +lldb < 516> read packet: <...> +(char *) $3 = 0xa900fffff7ff8000 "" +(lldb) p mte_buf +(char *) $4 = 0xa900fffff7ff8000 "" +(lldb) p mte_buf_alt_tag +(char *) $5 = 0xba00fffff7ff8000 "" +``` + +* LLDB reads mte\_buf (“send packet:”) and caches the result (from “read packet:”). +* Next time it wants to read mte\_buf it gets it from the cache. +* If we use a pointer with different non-address bits, it’s also read from the cache because the virtual address is the same. + +Prior to LLDB 15, this would have resulted in 2 reads for the same location. Which isn’t the end of the world, unless your debug target is on the other end of the world! + +This applies to memory writes also. A write via mte\_buf\_alt\_tag would clear cached content for mte\_buf and mte\_buf\_alt\_tag because they’re pointing to the same virtual address. + +# Signed Function Pointers + +This feature was [contributed by Jason Molenda](https://reviews.llvm.org/D115431). + +``` +(lldb) p fn_ptr +(char (*)(size_t, int)) $0 = 0x003d0000004006ac (actual=0x00000000004006ac a.out`checked_mmap at main.c:13:48) +``` + +We have a signed (pointer authenticated) function pointer, which you see first. Then the virtual address along with the symbol that resolves to. + +Prior to LLDB 15 it would have told you it had no idea what this points to. Hopefully you already knew what it was supposed to be! + +This isn’t an issue if you’re looking up a known function in your binary. It is if you’ve got a function pointer set in a struct somewhere. On top of that your ABI might mandate that all function pointers be signed and you’d hit this all the time! + +# Backtrace + +This was [implemented by Omair Javaid](https://reviews.llvm.org/D99944) back in [LLDB 13](https://releases.llvm.org/13.0.0/docs/ReleaseNotes.html#changes-to-lldb), but it fits the theme so I’ve included it here. + +This is another angle on the previous situation. Remember that you can use pointer authentication to sign all your return addresses? Well a return address is really just a function pointer. + +If we’re in the middle of a function our return address will be on the stack in its signed form. LLDB can’t just use it like that, it has to remove the non-address bits to continue the backtrace. + +``` +(lldb) dis +test.o`b: + 0x400588 <+0>: paciasp + 0x40058c <+4>: stp x29, x30, [sp, #-0x10]! + 0x400590 <+8>: mov x29, sp + 0x400594 <+12>: bl 0x40057c ; c at test.c:1:14 + 0x400598 <+16>: nop +-> 0x40059c <+20>: ldp x29, x30, [sp], #0x10 + 0x4005a0 <+24>: retaa +``` + +Above you can see that when we entered “b()” we used the instruction “paciasp”. This signs the return address (in x30) with a modifier, which is the stack pointer’s value. Then the two registers are stored on the stack. We’re about to reload those registers then authenticate the return address with “retaa” (return and authenticate). + +``` +(lldb) bt +* thread #1, name = 'test.o', stop reason = instruction step over + * frame #0: 0x000000000040059c test.o`b at test.c:2:21 + frame #1: 0x00000000004005b4 test.o`a at test.c:3:16 + frame #2: 0x00000000004005d0 test.o`main at test.c:4:14 + frame #3: 0x0000fffff7e7f090 libc.so.6`__libc_start_main + 232 + frame #4: 0x000000000040049c test.o`_start at start.S:92 +``` + +Backtrace looks just like normal thanks to LLDB removing the non-address bits from the stored return address (“unsigning” it if you will, but not actually authenticating it). + +``` +(lldb) memory read $sp $sp+16 -s8 -f uint64_t +0xfffffffff310: {0x0000fffffffff320} +0xfffffffff318: {0x006c0000004005b4} +``` + +If we read the stack contents manually we can see first the frame pointer then the signed return address (that “6c” in the upper bits). Without LLDB removing the signature we have no hope of backtracking from here. + +# Extension Specific Features + +There are situations where you do want to see a subset of the non-address bits. For example, if you’re debugging the use of a non-address bit extension like Memory Tagging. For extension specific features, LLDB will show the relevant bits. + +``` +(lldb) memory tag read mte_buf_alt_tag +Logical tag: 0xa +Allocation tags: +[0xfffff7ff8000, 0xfffff7ff8010): 0x0 (mismatch) +``` + +The logical tag is part of the pointer, so this has been shown separately and the addresses shown later do not include any non-address bits. Non-address bits (like the logical tag) are a property of a pointer, not of the memory it points to. Non-address bits aside from the logical tag are ignored because they don’t relate to MTE. + +``` +(lldb) c +Process 175530 resuming +Process 175530 stopped +* thread #1, name = 'prog', stop reason = signal SIGSEGV: sync tag check fault (fault address: 0x200fffff7ff9000 logical tag: 0x2 allocation tag: 0x1) + frame #0: 0x0000fffff7ee3e94 libc.so.6`___lldb_unnamed_symbol2690 + 84 +libc.so.6`___lldb_unnamed_symbol2690: +-> 0xfffff7ee3e94 <+84>: str q0, [x0] +``` + +In some cases we decided to show the raw pointer value with annotations. In the case above we know the fault is due to memory tagging. However, given how many segfaults happen because of corrupted pointers, LLDB shows the full value of the pointer as well as the memory tagging specific information. + +# Corrupted Pointer or Non-Address Bits? + +All these examples are all well and good but we have situations where these non-address bits get set by mistake. You get your pointer arithmetic wrong, you copy the wrong data type over a function pointer etc. + +This makes for a frustrating situation for developers. What if I corrupted a pointer with no non-address bits used, to look like one that did use non-address bits? Can I tell what happened? + +Not right away. Since the debugger is going to remove non-address bits for you, a lot could slip by until the program fails at runtime. For example, what if you accidentally set the memory tag on a pointer. How can you tell it’s accidental? This is where knowing how these LLDB features work helps you. + +“memory read” is going to work regardless. LLDB doesn’t need a correct tag to read memory. What you could do is do “memory tag read”. This would either tell you that the memory was untagged or that the tags didn’t match (or if you’re really unlucky you’ll hit the 1/16 chance where they do). + +From there it’s up to you the developer to know the context. Were you expecting this memory to be tagged, were you expecting this pointer to be tagged? + +# In Conclusion + +If you’re using any non-address bit features you should be using LLDB 15. It has many improvements to the debug experience and I hope this post has given you the knowledge to make the most of it. + +LLDB’s handling of non-address bits is based on early development with these features. In some cases only on virtual platforms like QEMU. + +That isn’t going to perfectly match the developer experience as physical hardware becomes available. So if you find something that doesn’t make sense to you, please raise [an issue](https://github.com/llvm/llvm-project/issues) or email linaro-toolchain@lists.linaro.org. All feedback is welcome! + +And for more information on Linaro's Arm LLVM Toolchain Enablement & CI project, click [here](https://linaro.atlassian.net/wiki/spaces/LLVM/overview). diff --git a/src/content/blogs/lvc20-wrap-up.mdx b/src/content/blogs/lvc20-wrap-up.mdx new file mode 100644 index 0000000..305da92 --- /dev/null +++ b/src/content/blogs/lvc20-wrap-up.mdx @@ -0,0 +1,196 @@ +--- +title: LVC20 Wrap Up +description: Our latest Connect meeting in September was not quite what we had + in mind at the beginning of the year. Yet, in these unusual times we have been + able to host a very successful Linaro Virtual Connect (LVC20). Over 65 + sessions were delivered. With presenters from around the world and from world + class leading businesses, we were able to preserve everything you expect from + Linaro Connect. This blog is a summary of some of the highlights from the + event. Enjoy! +date: 2020-10-07T10:04:33.000Z +image: linaro-website/images/blog/connect-2020-virtual-1- +tags: [] +author: jon-burcham +related: [] + +--- + +## Linaro Virtual Connect 2020 + +![class=small-inline left Linaro Virtual Connect logo](/linaro-website/images/blog/connect-2020-virtual-1-) + +In early 2020 we were looking forward to our twice annual Linaro Connect event and to welcoming the 400+ Linaro employees, assignees, members, partners and others in the open source community. Well, as you all know things in 2020 haven’t quite gone as planned but we are happy to be able to host Linaro Virtual Connect as a way to preserve some of the things we love most about Linaro Connect. + +The shift to a virtual event has been a learning experience for everyone - events team, speakers, and attendees and we thank you for your patience and willingness to try something new. One benefit of a virtual event is that attendees and speakers who have not been able to attend Linaro Connect in the past have had the chance to participate. + +We are very happy to have so many of you invested in Linaro Connect and pleased that we are still able to deliver the important technical content. + +With all the changes 2020 has brought, Linaro Connect continued to the tradition of quality sessions at the forefront of Arm ecosystem development. The three days were staggered at different start times to accommodate as many time zones as feasible. With a large percentage of sessions prerecorded, we were able to feature speakers from around the world. + +There were an astounding 1,476 individual registrations, three times the in person Connect average. Live conversations started during the sessions and continued in the Slack channels afterwards. For a change of pace, Kassidy Holmes led participants through a 1 hour foundational yoga flow on Wednesday. On Thursday, Martin Jackson wrapped up the week with a lively acoustic set. + +The most popular sessions (by registration counts) were. + +* [Arm64 Linux Kernel Architecture Update](https://lvc20.sched.com/event-goers/adfde6a151331482f8037ffcb2440e56) +* [Trusted Firmware Project Update](https://lvc20.sched.com/event-goers/06e61cea5736945a61870dbdae81512a) +* [Arm Architecture 2020 Extensions](https://lvc20.sched.com/event-goers/fcda9a5f34400338debca060c1dd2032) +* [PSA Secure Partitions in OP-TEE](https://lvc20.sched.com/event-goers/12ccff0741601ae997bf4057c56ccec3) +* [Enable UEFI Secure Boot Using OP-TEE as a Secure Partition](https://lvc20.sched.com/event-goers/ee38e4c30840e698eb7d34578f1d5633) + +## Tuesday September 22 Highlights + +[LVC20-100K1 Opening Keynote](https://resources.linaro.org/en/resource/cCMFrV55UD3TydmKVGLkAK) + +Li Gong + +Li noted that Linaro ranked #5 in patches committed in the Linux kernel between 2007-2019, even though Linaro is only 10 years old. Li noted that new companies are starting up at a brisk pace. Companies are making their own chips, carrying differentiation from the system level to the chip level. The cost of chip design has significantly declined making custom chip design more affordable. This is a way to secure access to IP and supplies particularly in the face of on-going shortages and trade wars (see China/US). This of course has a direct impact on SoC vendors. In Li’s view, the Arm ecosystem is composed not only of hardware vendors but now major software vendors are playing a role. Linaro is enlarging its scope for these new companies and the software vendors with the idea of a franchise company. A franchise, in this context, is a sub ecosystem within the Arm ecosystem. An example is Google and Android. Linaro also has simplified its membership model to four tiers - Core, Club, Group and Project, while continuing to focus on delivering value through its corps of maintainers and skilled developers. Finally, Linaro is opening up its projects and processing to a worldwide audience. + +[LVC20-100K2 Why Standardisation on the Edge is Critical for Success ](https://resources.linaro.org/en/resource/PTTosUJCmmdpRj3aCCWRUV) + +Peter Robinson + +While “Edge” means many things to many people, in order for Edge solutions to be a success, the key is using the Open Standards Data Center model. Edge platforms face several challenges including environmental, scale and cost. Which will vary depending on which Edge “tier” that platform plays in. Open standards, as with the IBM PC and various networking standards (TCP, HTTP), lower barriers to entry and protect investments in time, money and knowledge. Enterprise standards are useful. In addition to knowledge reuse, you are not locked into a single platform. You can use the right hardware with the same base software stack using the same or similar security models and processes. Provisioning and on-boarding end devices need to be deployed with general knowledge at large scale (10 of thousands) across geographic regions. Using OCI container solutions provides consistency and scalability. Standardisation in edge computing giving manufacturers and consumers more, not less, choice. + +[LVC20-103 Compliance Testing For Edge and IoT](https://resources.linaro.org/en/resource/Ug6ezaDqtJXbxfxJTDK65r) + +Grant Likely + +As usual Grant gave a practical talk on direction and progress in the complex space of edge and IoT device booting. Platforms are only created when interfaces are stable and reliable - there is less work per platform enablement + +* Stage 0 : Enable UEFI on U-Boot +* Stage 1: Rune UEFI Self Certification test (SCT) +* Stage 2: Interpret SCT results; currently lacking a parsing tool to interpret results +* Stage 3: Boot an unmodified OS to a functional state; Device Tree is critical here but support is inconsistent +* Stage 4: Test multiple deployment scenarios; work required here; can use some tools from the Data Center world. + +[LVC20-113 Trusted Firmware Project Update](https://resources.linaro.org/en/resource/n68pJGdpXDBxkQwTj83vwk) + +Matteo Carlini and Shebu Varghese Kuriakose + +The Trusted Firmware (TF) project’s mission is to collaboratively build a secure reference software implementation for Arm processors. Highlights from the past year include the addition of Mbed TLS (donated by Arm) and Hafnium (donated by Google) to TF. TF for Cortex-A (TF-A) v2.3 and TF- Cortex-M (TF-M) v1.1 were both released. Renesas and NXP joined as members. Also a new Security center was set up to provide consistency on handing security vulnerabilities and incidents. A new maintainer process was put in place including how code reviews and the patch lifecycle are managed. Half of the TF maintainers are now from outside of Arm. The project also announced that Don Harbin (Linaro) will be the TF Community manager and that the [TF website](https://www.trustedfirmware.org/) has been significantly updated. Looking ahead, TF is expanding CI/Testing efforts with more platforms, static analysis and updated user guides. The community project will also be sponsoring workshops on TF-M and Mbed TLS. + +A don’t miss session is [LVC20-104 On the Edge of the Real World. An Introduction](https://resources.linaro.org/en/resource/8nB5VYQHPqWWi6L7irAero) by Bruno Verachten. A lively talk from Bruno on how to build home IoT systems. The bottom line? There’s a lot of choice! + +[LVC20-117 Everything You Want to Know About Live Migration on Arm64 Cloud](https://lvc20.sched.com/event/dU6x/lvc20-117-everything-you-want-to-know-about-live-migration-on-arm64-cloud) + +Zhenyu Zheng and Kevin Zhao + +Currently, one big gap between Arm64 and X86 cloud platforms is that X86 can provide a much better instance migration experience than the Arm64 platform. CPU comparison and CPU model capabilities have provided the Arm64 VM with the ability to live migration among different hardware vendors. This function is the essential function of the data center. From the cloud management framework, we also need to consider the realization of supporting VM live migration. In this session, we discussed what we have done in the most widely used virtualization management tool - Libvirt to provide better live migration capabilities on Arm64 platform and also some details in the newest lightweight cloud management project such as Kubevirt. With live migration support on Arm64, it can finally benefit the cloud ecosystem for large scale datacenter scenarios which may use different Arm64 CPU architectures and vendors. In the process of the work the team discovered missing or lack of functional code for Arm64. It is possible these issues exist in other code areas. + +*** + +## Wednesday September 23 Highlights + +[LVC20-200K1 Keynote Part 1: EPI The European Approach for Exascale Ages: The Road Toward Sovereignty](https://lvc20.sched.com/event/eVtb/lvc20-200k1-keynote-part-1-epi-the-european-approach-for-exascale-ages-the-road-toward-sovereignty) + +Jean Marc Denis + +The goal of the European Processor Initiative (EPI) is to become one of the leaders in High Performance Computing (HPC). EPI is a European backed initiative to develop a complete European designed high end microprocessor addressing the Super Computer and edge-HPC segments. This week, the EU committed €8 billion for this effort. Europe wants sovereign access to high performance, low power microprocessors from initial IP to delivered products and reduce the dependence on non-EU suppliers. SiPearl is the industrial partner of EPI operating as a commercial entity to benefit the EPI project and its members. The plan is to have a unified environment based on Arm Zeus cores by 2023. Different classes of accelerators will be used as part of the workflow engines. The main strength of Arm is IP going from device to the super computer. The presenter's view is that the NVidia/Arm alliance is a unique, fantastic combination of AI and GPU capabilities. + +[LVC20-200K2 Keynote Part 2: Developing Rhea the SiPearl European High Performance Processor](https://lvc20.sched.com/event/eVtL/lvc20-200k2-keynote-part-2-developing-rhea-the-sipearl-european-high-performance-processor) + +Craig Prunty + +SiPearl's first target market for Rhea chips is HPC. Then in order Cloud, Edge (which is similar to Cloud requirements) then Automotive/Industrial Edge. Arm is attractive as it has a fully fledged ecosystem. Good hardware needs good software. The Arm ecosystem is self-sustaining. “Arm wins because it has an ecosystem”. Rhea is a hyperscale processor using the Arm Zeus core with coherent on chip network; There is a need to scale from chip to HPC to Edge applications. It is important to include high bandwidth memory with low power, low latency links using state of the art design and architecture. The plan is to make the EPI Common platform open standard in the future. Rhea is intended to be a General Purpose Processor balanced between performance and memory bandwidth. You will also need accelerators to provide optimization. + +[LVC20-200K Keynote: Respect! (R-E-S-P-E-C-T)](https://lvc20.sched.com/event/dU7C/lvc20-200k-keynote-respect-r-e-s-p-e-c-t) + +Carlo Piana + +Carlo muses on the cultural basis for license compliance in the Open Source community. The pillar is respect, not fear of litigation or fines. Enforcement does not happen that often and companies are getting better at compliance. Carlo posits that compliance comes from social norms - the right thing to do. If rules are simple and straightforward as well as reasonable and self-evident, compliance will be nearly uniform. The standard Open Source licenses are reasonable and straightforward so it is easy to comply. In order to receive respect, you must give respect. People can pledge to a higher standard using such as Open Chain and SPDX in headers. One way to show respect is to take a bit of effort to ensure users of your code can easily find the licensing and restrictions in your code. + +[LVC20-206 Journey of EBBR Compliance and NXP Devices](https://resources.linaro.org/en/resource/TLFojavzLY83iU72Ba9Ryi) + +Poonam Aggrwal, Priyanka Jain and Ilias Apalodimas + +The Embedded Base Boot Requirements (EBBR) specification defines requirements for embedded systems to enable interoperability between SoCs, hardware platforms, firmware implementations, and operating system distributions. EBBR is targeted at making operating system/distros agnostic to the platform. Same operating system image should run on any hardware with a well-defined firmware interface which is EBBR compliant. + +There has been significant work going on in U-boot with regards to EBBR in the open-source community. Various features like bootefi are already available with many other features in queue. + +This presentation aims at explaining how EBBR specifications gets mapped to NXP platforms and demonstrating EBBR compliance for NXP platforms. The reference boot-architecture will be based on TFA, u-boot, device-trees, Linux and OPTEE (for secure uefi flow). This will demonstrate distros like SUSE running on NXP SoCs using bootefi command, secure uefi flow, etc + +Also efforts are going to ensure that the U-boot is EBBR compliant by running FWTS, SCT for EBBR. The idea is to make the u-boot feature complete and can be demonstrated as EBBR compliance on NXP devices. + +[LVC20-219 Arm Virtualization Fireside Chat and Project Stratos](https://resources.linaro.org/en/resource/GpD5xjtuc52Uwp62vMnfCF) + +Mike Holmes, Azzedine Touzni and Alex Bennée + +Virtualization fireside chat moderated by Mike Holmes Director of Engineering Foundational Technologies at Linaro featuring guests: Azzedine Touzni Sr. Director of Engineering at Qualcomm, Alex Bennée Sr. Software engineer at Linaro. + +This fireside chat covered topics such as the current challenges in the virtualization domain, the state of virtualization on Arm, the end vision for Stratos, how to get involved, and more… + +[LVC20-211 Next Evolutions for Linux Scheduler](https://lvc20.sched.com/event/dU7j/lvc20-211-next-evolutions-for-linux-scheduler?iframe=no) + +Vincent Guittot + +The scheduler has been the place of a lot of changes during the past releases with new interfaces to set properties of tasks and/or groups of tasks; Other evolutions are ongoing and this session will go through the main changes merged during the past releases and the ongoing discussions for next changes. + +## Thursday September 24 Highlights + +[LVC20-300K Let's Butcher Software Development Analytics](https://lvc20.sched.com/event/dU87/lvc20-300k-lets-butcher-software-development-analytics-together-so-you-dont-have-to-when-it-really-counts) + +Jose Manrique López de la Fuente + +Jose started quoting William Edwards Deming “Without data, you are just a person with an opinion”. So what to do with data - “Strategy without tactics is the slowest route to victory. Tactics without strategy is the noise before defeat.” - Sun Tzu + +Data must be matched with context knowledge. Looking at the Linaro Use Case, there is a huge amount of contributors affiliated to “Unknown” organizations. Why? Is data wrong? + +Jose notes that every company is becoming a software company. And how a company interacts with the software ecosystem can help or hurt them. Success is understanding the (1) Legal aspects (2) People and (3) Engineering and the give and take between them. Jose recommends that you check out howtogroup.org and chaoss.community for wisdom. The best approach is to combine strategy, tactics and context knowledge to approach open source and generated analytics. + +[LVC20-301 In Conversation with Todd Kjos and GKI V2](https://lvc20.sched.com/event/dU8A/lvc20-301-in-conversation-with-todd-kjos-gki-v2) + +John Stultz & Todd Kjos + +This session was an Interview (by John Stultz) with Todd Kjos, Google, about the reason for, and challenges with, Generic Kernel Image (GKI) project. Fragmentation is in the way of keeping Android devices up-to-date with latest kernel patches (bugs,security, h/w, etc) - based on LTS kernel. Google is keeping Pixel devices up-to-date but few others are doing the same. A module kernel approach to allow Google to keep the kernel up-to-date and allow vendors to maintain their customizations. Tools keep track of ABI stable symbols (KMI - Kernel Module Interface - subset of the full kernel ABI). Google strongly recommends vendors to send their changes into the upstream kernel. Benefit of common use and less fragmentation (when differentiation isn’t actually required but has happened in the past). Therefore helping vendors to eliminate unnecessary technical debt. + +[LVC20-306 OpenAMP Community Project Update](https://lvc20.sched.com/event/dU8P/lvc20-306-openamp-community-project-update) + +Tomas Evenson & Nathalie Chan King Choy + +The OpenAMP Linaro Community Project is focusing on standardizing aspects of embedded heterogeneous software through open source projects. Current SoCs are very heterogeneous. In the past, integrations of the various computing components has been ad-hoc. The goal of OpenAMP is to standardize the interactions between the components. Project has a Board, a Technical Steering Committee and working groups ( System Device Tree, Remoteproc / RPmsag, Virtio and libmetal) + +[LVC20-On the New IO Benchmarking Framework Being integrated in LKFT](https://lvc20.sched.com/event/dU8h/lvc20-312-on-the-new-io-benchmarking-framework-being-integrated-in-lkft) + +Anders Roxell and Paolo Valente + +LKFT has been endowed with I/O benchmarks. They measure both I/O throughput with general workloads, and system-and application-level latency under heavy loads. More benchmarks are being evaluated as well. In addition, the current I/O benchmarks are being used as a pilot case, to make a general solution for automatic detection of performance regressions. In this presentation we will describe these interesting developments. + +[LVC20-310 Testing IoT Devices -Design and Progress from LITE Team](https://lvc20.sched.com/event/dU8b/lvc20-310-testing-iot-devices-design-and-progress-from-lite-team) + +Kumar Gala & Paul Sokolovsky + +Testing MCU’s (Cortex-M) systems presents challenges different from Cortex-A/Linux systems. Using LAVA infrastructure and tools, the team has worked with the LAVA and LAB teams to evolve the apps and tools to support continuously running tests in a CI context. A key piece was to utilize containers to provide consistency and the ability to run in multiple environments. Kumar Gala walked through a short demo on what this looks like a developer system. Paul Sokolovsky spoke about using this setup for network testing with the result of finding serious regressions prior to the October release of Zephyr. + +[LVC20-315 Using Rust in MCUboot](https://lvc20.sched.com/event/dU8q/lvc20-315-using-rust-in-mcuboot) + +David Brown + +It seems the Rust programming language comes up frequently these days, and there is a lot of interest in it. We have been using Rust to implement a simulation test environment within MCUboot since 2017. This presentation will discuss our experience with Rust, and our hopes of how this language might help other projects in the future, especially in regards to security. + +[LVC20-303 State of Big Data and Data Science on Arm](https://lvc20.sched.com/event/dU8M/lvc20-303-state-of-big-data-and-data-science-on-arm?iframe=no) + +Ganesh Raju + +Big Data is a huge ecosystem of a large number of applications and companies. Linaro’s goal is to ensure that Arm platforms are on par or better with other architectures in as many areas as practical. As part of this effort, the team is benchmarking against x86 and optimizing to leverage AArch64 advantages. Achieving this influence has taken time and effort to encourage projects and applications to consider the Arm server platform. The focus has been on Apache Big Top as it is a foundation for many other applications and products. LDCG has been a leading contributor. The next release of Big Top is v1.5 in a few weeks. The team is also looking at Apache Ambari - a management and security application. + +[LVC20-317 Analysis of Arm64's Competence for Oil/Gas Seismic Data Processing Applications](https://lvc20.sched.com/event/dU8G/lvc20-317-analysis-of-arm64s-competence-for-oilgas-seismic-data-processing-applications?) + +Jinshui Liu + +This presentation presented a real world challenge - how to analyze the results from Full Wave Inversion (FWI) testing. This is used in oil and gas exploration, active drilling and reservoir prediction. This is a convergence of HPC and AI as the proposal is to use Deep Learning to build models. The speaker notes that this work is highly parallel processing. As an example, one company put together 250 Petaflops with 40,000 Intel Xeon Phi 7250 (68 cores per socket) to tackle this problem. To date, the high computing cost has discouraged work in this area. The speaker is proposing an academic- Industry alliance to tackle this problem through the HPC-AI project. + +*** + +With distinguished presenters from around the world and from some world class leading businesses, our Virtual Connect delivered 65 sessions. This content covers some of the groundbreaking topics of the moment and will be added to our already vast amount of resources that has been compiled over the last 10 years ago, when Linaro was first founded. + +Although the current climate has curbed our celebrations for our anniversary, it has not curbed our achievements and we are proud of our successes in working together with our members and the community alike. + +All videos and slides from presentations are now available on our [Resource page](https://resources.linaro.org/en/tags/3599c8da-2a90-4dc9-964f-d69a5cf15379). + +We look forward to our next in person event when we can enjoy some of our favorite aspects of Linaro Connect- team hacking time, Joe Bates’ morning fun facts, the “Ask Arm Anything” session, socializing after hours with colleagues and friends, Demo Friday, and everyone’s favorite: Dave Pigott’s puzzle. + +Thank you for attending Linaro Virtual Connect 2020. + +Until next time! + +Authors: Vicky Janicki Kristine Dill, Jonathan Burcham, Mark Orvek and David Rusling diff --git a/src/content/blogs/many-uses-of-qemu.mdx b/src/content/blogs/many-uses-of-qemu.mdx new file mode 100644 index 0000000..55cb7a0 --- /dev/null +++ b/src/content/blogs/many-uses-of-qemu.mdx @@ -0,0 +1,72 @@ +--- +title: The many and varied uses of QEMU +description: In this article, Alex Bennée provides an overview of some of the + engineering enabled by QEMU throughout Linaro and beyond. Read more here! +date: 2021-05-18T08:00:00.000Z +image: linaro-website/images/blog/code-background_1 +tags: + - qemu + - iot-embedded + - open-source + - arm + - linux-kernel +author: alex-bennee +related: [] + +--- + +# Introduction + +QEMU is a versatile virtual machine monitor (VMM) and emulator. While there are other options for handling hardware virtualisation its pedigree as an emulator is almost unique in the open source world. Very few come close to emulating such a broad range of architectures and general purpose hardware on such a wide range of host systems. + +There has been a QEMU team inside Linaro since our creation over 10 years ago. The team has been heavily involved in the upstream maintenance of the project and over that time we have been the 3rd biggest contributor to the code. The core team mostly concentrate on[ enabling new architectural features](https://projects.linaro.org/browse/QEMU-241). The development pipeline for working silicon is so long FLOSS developers don't want to wait until actual hardware ships to be able to test their new features. A good recent example was the work to implement [Scalable Vector Extensions (SVE)](https://www.linaro.org/blog/sve-in-qemu-linux-user/). This has been available in QEMU since 3.0 but unless you are very lucky to get access to a [A64FX](https://www.fujitsu.com/global/products/computing/servers/supercomputer/a64fx/) system real hardware is still out of the reach of most FLOSS developers. More recent examples include the [Memory Tagging Extension (MTE)](https://wiki.qemu.org/ChangeLog/5.1#Arm) and [Branch Target Identification (BTI)](https://wiki.qemu.org/ChangeLog/5.2#Arm) features which allow the processor to assist in validating memory and pointers and are key components for compiler "hardening" efforts when generating secure code. + +While the core team is quite small it doesn't mean that work is not done on QEMU elsewhere in Linaro. As a flexible software model QEMU is ideally suited to enabling low level development and testing of a variety of use cases. + +# IoT development with M-profile + +Most people think of Arm processors as low power devices due to their usage in mobile phones. While it's true Arm cores give a lot of performance per-watt, the chips that mobile phones use are general purpose [A-profile](https://developer.arm.com/architectures/cpu-architecture) cores designed for running so called "high-level" operating systems like Linux. The M-profile is aimed at situations +where efficiency and power consumption are even more important. While they share the same core [Instruction Set Architecture (ISA)](https://developer.arm.com/architectures/instruction-sets/base-isas) they have a different set of system features. For example, they trade a full-featured [Memory Management Unit (MMU)](https://en.wikipedia.org/wiki/Memory_management_unit) needed by a multi-user system like Linux for an optional [Memory Protection Unit (MPU)](https://developer.arm.com/documentation/ddi0337/h/memory-protection-unit/about-the-mpu) better suited to tightly integrated use-cases. Support for the "internet of things" relies on these capable but low power processor cores. + +Developing for such devices can often involve having special versions of the target board which expose debug headers to the developers system. These need to be connected with specialised JTAG headers to access the debug pins which can be a bit fiddly if you are just getting started with micro controller development. + +Emulation will never be a complete replacement for running code on the final target, however it is very useful for experimentation. QEMU does provide a variety of M-profile targets which can be debugged with standard tools like **gdb**. Over the last few years we've added a range of [Arm MPS boards](https://qemu.readthedocs.io/en/latest/system/arm/mps2.html) which are modern embedded development boards which are ideal for testing the latest features like TrustZone. As a software model QEMU also makes it easier to integrate tests into Continuous Integration (CI) loops that can be used to check new contributions to the code base. + +One of the items we are working on this cycle is [bringing in support for the M-profile vector extensions](https://projects.linaro.org/browse/QEMU-406). While you may think of large server rooms of dedicated hardware training machine learning models, those resulting models are often run on very resource constrained targets. Here [QEMU can provide](https://projects.linaro.org/browse/AI-57) a useful target for running such models on IoT devices. + +# Common reference platforms + +[The Open Portable Trusted Execution Environment (OPTEE)](https://www.op-tee.org/) is a key part of providing the trusted execution environment on Arm. It operates at the lowest level of the software stack and has to have intimate knowledge of the hardware it's running on. Generally referred to as firmware, it is usually built by the OEM who designed the hardware. However when developing generic features it helps to have a standard platform which all developers can have access to. Here QEMU offers an[ excellent target](https://optee.readthedocs.io/en/latest/building/devices/qemu.html) which supports secure world peripherals that only the +firmware can access. + +In fact having a reference platform is useful in all sorts of cases. The [Server Base System Architecture (SBSA)](https://developer.arm.com/documentation/den0029/latest) standard is a big part of the drive to "make servers boring". It provides enough standardisation for OS manufacturers so they can can target the architecture without building special installers for a myriad of platforms. The aim is you can just insert a single build of the install media and expect it to work. To help test the firmware that supports this we have the[ sbsa-ref](https://qemu.readthedocs.io/en/latest/system/arm/sbsa.html) board. It is very much a fixed platform that it's quite hard to directly boot a generic kernel on. This is because a kernel is expecting all the details to be provided by an SBSA compliant firmware which will "know" the details of the hardware by virtue of having it baked in during the build process. + +The standard is an evolving one and later levels also require a minimum base architecture spec. It's for this reason we have a [bunch of work](https://projects.linaro.org/browse/QEMU-418) this cycle to provide newer CPU and GIC models to support work on the later levels of the specification. If an operating system can boot on a reference SBSA platform like QEMU then you can be fairly +certain it will boot on any other similarly compliant piece of real hardware. + +# Testing in the Cloud + +Linaro's [Linux Kernel Functional Test (LKFT)](https://lkft.linaro.org/about/) team are focused on doing functional regression testing on a number of public kernel trees. This augments other kernel CI activities across the community but obviously with a very Arm flavoured focus. After building with [TuxSuite](https://tuxsuite.com/) tests are run on a number of platforms. However while building in the cloud allows for easy scaling of capacity the testing still requires physical hardware which requires maintaining racks of machines which are considerably harder to scale up on demand. We are still some way off having ubiquitous nested Arm virtualisation in the cloud but we can certainly easily spin up x86 machines and run QEMU on them to emulate various bits of Arm hardware. + +Of course while we are focused on improving the Arm experience we still have to ensure patches don't break the kernel's support for other architectures. Again QEMU's support for a wide range of guest architectures means we can test against software models without having to scale up a lab with lots of non-Arm hardware. + +# Stratos playground + +Project Stratos is a company wide effort to expand the usage of virtualisation on Arm platforms. The work encompasses both hypervisors and guests with a strong focus on using [VirtIO](https://www.linaro.org/blog/virtio-work/) for standardised device emulation and resource sharing. We are focused on testing against open source hypervisors like KVM and [Xen](https://xenproject.org/) as well as looking at up-coming secure hypervisors such as [Hafnium](https://review.trustedfirmware.org/plugins/gitiles/hafnium/hafnium/+/HEAD/docs/Architecture.md). Getting a hypervisor up and running requires a fairly tight integration between the firmware, boot loader and the host kernel. While most **sbsa** boards will happily boot +KVM on mainline distributions, results can vary for other solutions. If you are working on experimental hypervisor code you might not want to brick your main development machine to get an environment up and running. + +To avoid being blocked on hardware bring up you can utilise QEMU's **virt** platform and boot directly into a hypervisor while setting up[ a guest with the appropriate metadata](https://qemu.readthedocs.io/en/latest/system/guest-loader.html). QEMU's ISA emulation includes v8.1's[ Virtualization Host Extensions](https://lwn.net/Articles/650524/) which enable more efficient type-2 hypervisors as well as the recently merged [secure EL2 support](https://gitlab.com/qemu-project/qemu/-/commit/48202c712412c803ddb56365c7bca322aa4e7506) which is part of the recently [released QEMU 6.0](https://www.qemu.org/2021/04/30/qemu-6-0-0/). + +# Introspection, Introspection, Introspection + +There are advantages to running inside QEMU beyond having a nice sand boxed test environment. While attaching **gdb** to a real debug port can give a pretty good experience (assuming you have the headers required) it's hard to match the level of introspection possible via QEMU. For example QEMU exposes the entire [range of system co-processor registers](https://developer.arm.com/documentation/ddi0595/2021-03?lang=en) to the gdb stub. + +Anyone who has ever used the excellent [rr](https://rr-project.org/) to debug user space programs on Linux will know how useful it is to rewind time after your application has failed. While still relatively new to QEMU the ability to leverage [deterministic replay](https://wiki.qemu.org/Features/record-replay) to enable reverse debugging also promises to be a useful tool. + +Finally there is also the recently introduced [TCG plugins](https://qemu.readthedocs.io/en/latest/devel/tcg-plugins.html) feature which allows for some interesting experiments to be written to analyse code behaviour. The tool chain teams already take advantage of the ability to count executed instructions to measure the real world differences to changes in code generation. It's easy to imagine creating experiments to measure things like general cache residency or instructions spent executing in the kernel to service a user request, giving further insight to how code actually behaves when it is run. + +# Outro + +I hope this has been a useful overview of the many ways we use QEMU here in Linaro. All our work on QEMU is done in the open on the upstream development lists. If you want to play with some of the latest features please visit the [project website](https://www.qemu.org/) where you can find instructions for downloading and building from the source code. We hope to see you there ;-) + +For more information about Linaro and the work we do, do not hesitate to [contact us](https://www.linaro.org/contact/). diff --git a/src/content/blogs/mcuboot-becomes-a-linaro-community-project.mdx b/src/content/blogs/mcuboot-becomes-a-linaro-community-project.mdx new file mode 100644 index 0000000..a5b665b --- /dev/null +++ b/src/content/blogs/mcuboot-becomes-a-linaro-community-project.mdx @@ -0,0 +1,21 @@ +--- +title: MCUboot becomes a Linaro Community Project +description: > + MCUboot Project joined the Linaro Community Projects Division, the division of + Linaro managing open source community projects. Read about MCUboot here. +date: 2021-06-17T08:00:00.000Z +image: linaro-website/images/blog/cyber-security +tags: + - open-source + - security + - linux-kernel +author: linaro +related: [] + +--- + +Today the [MCUboot Project joined the Linaro Community Projects Division](https://www.mcuboot.com/news/blog/mcuboot-project-joins-linaro-community-projects-division/), the division of Linaro managing open source community projects, including Trusted Firmware and OpenAMP. MCUboot is a secure bootloader for 32-bit MCUs. + +The MCUboot project aims to define a common infrastructure for the bootloader and system flash layout on microcontroller systems to ensure secure and easy software upgrades. Operating system and hardware independent, MCUboot currently works with Apache Mynewt, Zephyr, and Mbed OS operating systems, as well as with the Trusted Firmware-M secure environment. The project plans to support upcoming standards such as Software Updates for IoT (SUIT), the recent NIST Cybersecurity for IoT Guidance drafts (NISTIR 8259D) and the FIDO Device Onboarding (DO) standard. The project also plans to continue support for new devices and new technologies in those devices. + +The founding members of this project are Arm, Infineon, [Linaro](https://www.linaro.org/contact/), Nordic Semiconductor and STMicroelectronics. To find out more about the project and how to join or participate, go to [www.mcuboot.com](https://www.mcuboot.com/). diff --git a/src/content/blogs/mhi-bus-for-endpoint-devices-upstreamed-to-linux-kernel.mdx b/src/content/blogs/mhi-bus-for-endpoint-devices-upstreamed-to-linux-kernel.mdx new file mode 100644 index 0000000..5353777 --- /dev/null +++ b/src/content/blogs/mhi-bus-for-endpoint-devices-upstreamed-to-linux-kernel.mdx @@ -0,0 +1,132 @@ +--- +title: MHI bus for Endpoint devices upstreamed to Linux Kernel +description: In this blog, Mani talks about how the Modem Host Interface (MHI) + bus support for Endpoint devices has been upstreamed to the Linux kernel. +date: 2022-05-19T07:58:54.000Z +image: linaro-website/images/blog/Tech_Background +tags: + - linux-kernel +author: manivannan-sadhasivam +related: [] + +--- + +At the start of the year 2020, I wrote [a blog on MHI bus support for Host devices](https://www.linaro.org/blog/mhi-bus-support-gets-added-to-the-linux-kernel/). Two years later I am back with an update on the MHI bus support for Endpoint devices. The timeline tells the story on its own: **Upstreaming is hard but it is always important to do so**. + +# What is Modem Host Interface (MHI)? + +[My previous article](https://www.linaro.org/blog/mhi-bus-support-gets-added-to-the-linux-kernel/) gave a brief introduction to the Modem Host Interface (MHI) bus and its implementation in the Linux kernel. Even though the article focused on the host side implementation, the concept remains the same. So I won’t go over the details again here. But here is a short summary on MHI: + +MHI is the communication protocol used by the host machines to control and +communicate with the Qualcomm modems/WLAN devices over any high speed physical bus like PCIe. MHI is represented as a [bus device](https://www.kernel.org/doc/html/latest/driver-api/driver-model/bus.html) in the Linux kernel with the client drivers getting bind to a set of bidirectional channels exposed as MHI devices. There are also MHI controller drivers that define the channels used by the endpoint devices like modems/WLAN chipsets. The MHI controller driver is the one that sits between the MHI bus stack and the transport bus like PCIe. + +![Modem Host Interface - MHI](/linaro-website/images/blog/modem-host-interface-mhi-) + +# Upstreaming the MHI bus for endpoint devices + +## Motivation + +The MHI host implementation has been used widely by various OEMs for connecting their modems to a host machine. These days, adding the support for a modem device may take only a couple of lines to the [PCI\_GENERIC ](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/bus/mhi/host/pci_generic.c#n449) MHI host controller driver. + +So while the MHI host stack continued to receive updates ever since it got merged, Linaro started to add the MHI bus support for endpoint devices in parallel. The push for the MHI endpoint support came from Qualcomm as their vision was to run the full upstream software stack on the modems. The full upstream software stack includes the Linux Kernel and the userspace components. + +Once the entire software stack is upstreamed, the OEMs can pull all the latest +versions of the software components directly from the respective repositories and provide updated firmware to their customers seamlessly. This will greatly help in building more secure modems with fewer or no software vulnerabilities. + +## Preliminaries + +Compared to our MHI host support work, the endpoint work got more pieces to stick together apart from MHI. + +We took [Qualcomm Snapdragon X55 5G modem](https://www.qualcomm.com/products/technology/modems/snapdragon-x55-5g-modem) as the target endpoint device and got the below development platforms: + +1. SDX55 MTP sponsored by Qualcomm. +2. [Telit FN980m](https://www.telit.com/devices/fn980-and-fn980m-data-cards-support-5g/) EVB sponsored by Telit. +3. [T55 development kit](https://www.thundercomm.com/product/t55-development-kit-5g-sub-6ghz/) purchased from Thundercomm. + +With these development platforms in place, we started upstreaming the SoC +support for the [SDX55 chipset](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/arm/boot/dts/qcom-sdx55.dtsi) and the board support for these platforms \[[1](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/arm/boot/dts/qcom-sdx55-mtp.dts)]\[[2](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts)]\[[3](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/arm/boot/dts/qcom-sdx55-t55.dts)]. + +Below is the list of features upstreamed by Linaro apart from the MHI endpoint bus support: + +1. GCC +2. Pinctrl +3. NAND +4. BAM DMA +5. USB PHY and controller +6. Watchdog +7. Interconnect +8. CPUFreq +9. Remoteproc +10. PCIe PHY and Endpoint controller + +Once the base support was done, we started looking into the MHI endpoint stack support for SDX55. + +## MHI Endpoint (MHI EP) stack + +We decided to take inspiration from the MHI EP bus support added by Qualcomm in their [downstream Linux kernel](https://www.google.com/url?q=https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/tree/LE.UM.5.3.2.r1-06300-SDX65.0/drivers/platform/msm/mhi_dev\&sa=D\&source=docs\&ust=1652956639476410\&usg=AOvVaw1sP-TfkLpDQVi2zfqYHJmT). After going through the driver, it was evident to us that the stack needed some heavy refactoring to fit +upstream. It was mostly because the downstream stack was tightly coupled to their standalone [PCIe endpoint controller](https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/tree/LE.UM.5.3.2.r1-06300-SDX65.0/drivers/platform/msm/ep_pcie) and [IPA](https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/tree/LE.UM.5.3.2.r1-06300-SDX65.0/drivers/platform/msm/ipa_fmwk) drivers. + +To break the dependency and make it easy to upstream, we followed the same code organization as the MHI host. Below is the final representation we came up with following the [MHI host](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/bus/mhi/host) architecture: + +![MHI Endport Stack](/linaro-website/images/blog/mhi-endpoint-stack) + +As the figure illustrates, the MHI EP stack sits in between the MHI EP controller driver and MHI EP client drivers. The MHI EP controller driver modelled as a [PCI Endpoint Function driver](https://www.kernel.org/doc/html/latest/PCI/endpoint/index.html) (PCI +EPF) registers itself as **mhi\_epN** device with the MHI EP stack and handles all the interactions with the underlying bus like PCIe. It takes care of operations such as enumeration, MSI generation, event handling, and read/write to the host memory. + +On the other hand, the MHI EP client drivers like QRTR, IPA, WWAN, etc,... register themselves as **IPCR, IP\_HW0, QMI** devices with the MHI EP stack and take care of transmitting the protocol specific packets between the host and the baseband processor. + +## Internal Address Translation Unit (iATU) + +So once we got settled with the MHI EP architecture, the next hurdle for us was communicating with the host over PCI. The Qualcomm downstream MHI EP code used the DMA engines available in IPA and eDMA peripherals. But unfortunately, IPA driver support was not ready for SDX55, and eDMA peripheral was not enabled in the SoC itself. + +So we were left with only one option and that's iATU (internal Address Translation Unit) embedded into the [Designware PCIe IPs](https://www.synopsys.com/designware-ip/interface-ip/pci-express.html) from Synopsys. iATU takes care of mapping the host memory onto the endpoint local memory through which the endpoint devices can do Memory Managed Input/Output (MMIO) operations to it. So even though we cannot use Direct Memory Access (DMA) for reading/writing to the host memory, we can at least do plain readl/writel operations from the Linux kernel by treating it as an MMIO region. + +But soon we came up against the limitations imposed by iATU and one of them was the availability of only 8 outbound/inbound windows. The iATU uses outbound windows to map the host address space in the endpoint and inbound windows to map the endpoint address space in the host (mostly BAR region). In the MHI EP stack, we needed to map many host buffers in the endpoint memory. But since there were only 8 windows available, we decided to keep only a couple of mappings constant and dynamically map the rest. This solved the problem for us. + +The other issue was the alignment requirement of the host address for mapping using the outbound window. The configuration of the PCIe IP in SDX55 required the host address to be 4k aligned. Initially, we tried to use the **bounce buffer** technique in the MHI host stack to allocate the 4k aligned buffers. But that proved to be costly as using the bounce buffer takes up extra cycles to [copy the buffers](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/bus/mhi/host/main.c#n191) in the host. Then finally Linaro Engineer, **Dmitry Baryshkov** pitched in and shared a trick that allowed us to map the host buffers without alignment requirements. + +Below is the illustration of the trick. Let's assume that the endpoint needs to map the host address starting from 0x40000100 with a size of 4KB (0x1000) to its local memory at 0x10000000: + +1. Find out the offset of the host address that when substracted gives the 4k aligned address. + + `offset = 0x40000100 % 0x1000; /* offset = 0x100 */` + +2. Allocate the endpoint memory including the offset along with the actual size. + + `buffer = alloc_addr(0x100 + 0x1000); /* alloc_addr(offset + actual size) */` + +3. Map the host address starting from the aligned address to the total + size including the offset. + + `map_addr(buffer, 0x40000100 - 0x100, 0x100 + 0x1000); /* map_addr(buffer, address - offset, offset + actual size) */` + +4. Finally we can do memcpy to the actual host address by adding the offset to the buffer with the actual size. + + `memcpy_fromio(0x10000000, buffer + 0x100, 0x1000);` + +In the above steps, we mapped the 4k aligned address of the unaligned address, then copied only the memory that we were interested in. This proved to be a nice and elegant hack to work around the iATU limitation. + +## Putting all the pieces together + +Once we were able to get the MHI EP stack up and running, we added all the pieces together. That included +an MHI EP controller driver modelled as a PCI EPF driver and the MHI EP client driver modelled as a +networking driver utilizing the **IP\_SW0** channels. With all these drivers in place, we got SDX55 enumerated +as a modem in the host and got the network devices **mhi\_swip0** to appear at both host and endpoint +representing the **IP\_SW0** channels. Through the network device, we were able to communicate between host +and endpoint over networking utilities such as **ping, ssh, iperf,** etc,... + +## Data connectivity + +Even though the MHI EP stack was fully up and running, we were not able to establish data connectivity with the modem baseband processor yet. For that, we needed IPA driver support enabled in SDX55 and integrated with the MHI EP stack. I can confirm that the work has already started and will hit the mailing list soon. + +## Upstreaming work + +After we thoroughly tested the stack on both SDX55 and Snapdragon 8 Gen 1 (SM8450) based development platforms, the [initial MHI EP patches](https://lore.kernel.org/all/6fc89860-9eea-630c-f193-272bf436ad81@linaro.org/T/) were posted to the [MHI mailing list](https://lore.kernel.org/mhi/). It has gone through multiple revisions (thanks to the reviews from both Qualcomm and Linaro developers, especially Linaro Senior Engineer Alex Elder for his in-depth review of the patches). And once the patches got enough reviews, we finally [submitted the patches](https://lore.kernel.org/lkml/20220405135754.6622-1-manivannan.sadhasivam@linaro.org/) to Char Misc maintainer, Greg KH for inclusion in the next kernel release v5.19. Later, he [pulled the patches](https://lore.kernel.org/lkml/YmfVSe1JHbXTtZLG@kroah.com/) to his char-misc-next tree. + +# What's next? + +As said above, the work is not finished yet. Our top priority is to add support for IPA and eDMA to the MHI EP stack and provide data connectivity to the host machines using the [WWAN drivers](https://www.kernel.org/doc/html/latest/networking/device_drivers/wwan/index.html). At the same time, we will continue to optimize both the MHI host and EP stacks for reducing the latency and increasing the throughput. For more information, check out some of our previous blogs and sessions on this topic: + +* [Linaro connect talk on upstreaming the Qualcomm modems](https://www.google.com/url?q=https://resources.linaro.org/en/resource/JW762ZTT7Qv3jtiY5UDF2U\&sa=D\&source=docs\&ust=1652956936204910\&usg=AOvVaw3HNFHvVzjoTFiAkL4gIrU3)[](https://www.linaro.org/blog/upstreaming-support-for-qualcomm-pcie-modems/) +* [Blog on upstream host support for Qualcomm modems](https://www.linaro.org/blog/upstreaming-support-for-qualcomm-pcie-modems/) +* [Linaro connect talk on IPA](https://resources.linaro.org/en/resource/P9mzGkAzt5cJZHe2zAGtUp) diff --git a/src/content/blogs/mhi-bus-support-gets-added-to-the-linux-kernel.mdx b/src/content/blogs/mhi-bus-support-gets-added-to-the-linux-kernel.mdx new file mode 100644 index 0000000..724c6fe --- /dev/null +++ b/src/content/blogs/mhi-bus-support-gets-added-to-the-linux-kernel.mdx @@ -0,0 +1,222 @@ +--- +title: MHI Bus Support Added To Linux Kernel +description: > + In this article, Manivannan Sadhasivam will briefly talk about the internals + of MHI and its implementation in the Linux kernel. Read about his findings + here! +date: 2020-03-17T08:37:59.000Z +image: linaro-website/images/blog/code +tags: + - linux-kernel +author: manivannan-sadhasivam +related: [] + +--- + +Greg Kroah Hartman once said, “Buses are hard and complex. It is hard to write a bus. But it turns out that there are one or two new buses every kernel release”. + +Recently, [a patch series](https://lkml.org/lkml/2020/1/23/249) was posted to LKML for adding MHI (Modem Host Interface) bus support to the Linux kernel. This article will briefly talk about the internals of MHI and its implementation in the Linux kernel. + +MHI is a communication protocol used by the host processors to control and communicate with modems over high speed bus interface like PCI-E or shared memory. The MHI protocol has been designed and developed by Qualcomm Innovation Center, Inc., for use in their modems like SDX20/24. The protocol aims to improve the communication between host processors and external modems. Modem chipsets have become quite complex as they perform several functions, such as downloading the firmware from host processor, controlling the wireless transceivers, receiving and processing the commands from host, handling multiple networking protocols, etc... In order to efficiently control and interact with the modem chipsets, MHI provides a comprehensive solution as a whole. + +Qualcomm has [patented](https://patents.google.com/patent/US9594718B2/en) this protocol and deployed it in a wide range of devices running Linux based OS. Even though the MHI protocol is closely tied with PCI-E, it is possible to use other physical interfaces as well. + +In 2018, Qualcomm made [a first attempt](https://lkml.org/lkml/2018/4/26/1159) at upstreaming this protocol to the Linux kernel. That effort didn't go far and it was discontinued after a couple of iterations. This work has now been revived by Linaro, addressing the concerns raised by the upstream maintainers for the initial submission by Qualcomm. + +### MHI Internals + +The MHI specification is proprietary and is not made available to the public. But, a reasonable effort has been made to document the protocol in the [recently submitted patch series](https://lkml.org/lkml/2020/1/23/250). The below content can be read in conjunction with the kernel documentation for MHI. + +### Channels + +The core part of MHI are logical channels, which are used to transfer data packets such as IP packets, modem control messages and diagnostic messages between host and modem via high speed physical interfaces such as PCI-E or shared memory. These logical channels act like unidirectional data pipes between host and modem device. In a typical usecase, there will be an MHI implementation running on both ends and they exchange information over these logical channels. Since these channels are unidirectional, 2 channels are required for bi-directional communication between host and modem device. There can be a maximum of 256 logical channels in a system. + +The channel configuration is static, which means the purpose of channel is fixed and will not be changed during runtime. The below picture illustrates the exchange of messages over IPCR channel with PCI-E as the physical interface. + +![ipcr-channel](/linaro-website/images/blog/ipcr-channel) + +```c +struct mhi_chan { + const char *name; + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 chan; + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + ... + struct mhi_device* mhi_dev; + ... +} +``` + +The above structure represents an MHI channel in the kernel. Note the `*mhi_dev` pointer in the structure. It will point to the MHI device created for this channel when MHI is in AMSS (Advanced Mobile Subscriber Station) or SBL (Secondary Bootloader) states. Most of the fields for this structure will some from `struct mhi_channel_config` available in the controller driver. + +### Events + +MHI events are the interrupts coming from the client device (e.g. the modem). The client device generates events for MHI state transitions, error conditions, completion messages to the host. The MHI events are generated using the Event Ring (ER), which essentially is a data structure available in the host memory, mapped for the device. The events’ rings are organized as circular queues of Event Descriptors (ED). Each event descriptor defines one event that is communicated from the device to the host through an actual physical interface such as PCI-E. Each event ring has an associated interrupt (MSI in case of PCI-E). The number of interrupts may be limited in the host processor, therefore multiple event rings may share available interrupts to accommodate more events. + +```c +struct mhi_event { + struct mhi_controller *mhi_cntrl; + struct mhi_chan* mhi_chan; /*dedicated to channel */ + u32 er_index; + u32 intmod; + u32 irq; + int chan; /* this event ring is dedicated to a channel (optional) */ + u32 priority; + ... +}; +``` + +The above structure represents an MHI event in the kernel. Note that there is an IRQ field for each event, which can be unique or shared. This IRQ will be used by the modem for sharing events to the host in the form of event rings. When an event gets added to the event ring, the IRQ associated with the event ring will be asserted in the host. Most of the fields for this structure will come from `struct mhi_event_config` available in the controller driver. + +### MHI Power Management + +The MHI power management (PM) is handled by the MHI stack. The MHI power management is about controlling the MHI states of the host and device. Following are the available MHI PM states: + +#### MHI\_STATE\_RESET + +Reset is the default MHI state after power-up. MHI can also enter reset state later on, when either the host or device request reset such as after encountering an error. During this state, the device software will initialize and set relevant MMIO registers internally. + +#### MHI\_STATE\_READY + +Once the device comes out of reset and is ready, it will set READY field in the MMIO register indicating that it is ready to accept MHI operations. The MHI stack running on the host will detect this change. In response, the host will prepare data structures and initialize the MHI MMIO register space. + +#### MHI\_STATE\_M0 + +MHI is running and operational on both host and the device. Now the host can start channels by issuing channel start command. In this state, the device can switch its EE (Execution Environment) to SBL, AMSS states and start generating events to the host. + +#### MHI\_STATE\_M1 + +MHI operation is suspended by the device. This state is entered when the device detects inactivity at the physical interface for a preset time and also the DEVICE\_WAKE signal is deasserted by the host. + +#### MHI\_STATE\_M2 + +MHI is in low power state. MHI operation is suspended and the device may enter lower power mode. + +#### MHI\_STATE\_M3 + +MHI operation is stopped by the host. This state is entered when the host suspends MHI operation. + +### MHI Firmware Download + +MHI supports downloading the device firmware over BHI (Boot Host Interface) protocol. The MHI stack assumes that there can only 2 types of firmware downloaded to the device, AMSS, and SBL. The firmware name should be provided by the controller driver as below: + +```c +mhi_ctrl->fw_image = "amss.bin"; +``` + +It should be noted that there can only be one firmware supplied at a time. If the device requires both AMSS and SBL images to be downloaded, then both firmware needs to be clubbed into a single firmware file, or another protocol is required to supplement the loading of additional firmware files. For the first case, additional properties shall be provided by the controller driver as below: + +```c +mhi_ctrl->sbl_size = SZ_512K; +mhi_ctrl->fbc_download = true; +``` + +This specifies that the MHI stack needs to do full image download with the size of SBL image specified using `sbl_size`. + +### MHI Bus Topology + +MHI bus implementation in the Linux kernel has 3 major components: + +1. MHI Device +2. MHI Controller +3. MHI Driver + +#### MHI Device + +MHI device is the logical device which is created for MHI controllers and channels. For the channels, there can either be a single MHI device for each channel (Uni-directional) or per channel pair (Bi-directional). This configuration will be determined by the MHI controller drivers. The MHI devices for the controllers are created during controller registration and the devices for channels are created when MHI is in AMSS state or SBL state. + +```c +struct mhi_device { + const struct mhi_device_id *id; + const char* chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan* ul_chan; + struct mhi_chan *dl_chan; + struct device dev; + enum mhi_device_type dev_type; + int ul_chan_id; + int dl_chan_id; + u32 dev_wake; +}; +``` + +The above structure represents an MHI device in the kernel. There is a `struct device` present for each MHI device and all available devices will be enumerated in sysfs under `/sys/bus/mhi/devices/`. + +#### MHI Controller + +The MHI controller is the MHI bus master in charge of managing the interactions with the client devices, such as modems. Each MHI client device will have a controller driver which will declare the MHI channels, events and IRQs, and will manage the power management operations of the client device. + +```c +struct mhi_controller { + struct device *cntrl_dev; + struct mhi_device *mhi_dev; + void __iomem *regs; + void __iomem *bhi; + void __iomem *bhie; + void __iomem *wake_db; + + dma_addr_t iova_start; + dma_addr_t iova_stop; + const char *fw_image; + const char *edl_image; + size_t rddm_size; + size_t sbl_size; + ... +}; +``` + +The above structure represents an MHI controller in the kernel. Note the `cntrl_dev` pointer in the structure, which is used to pass the underlying transport's (e.g. PCI-E) device pointer to the MHI stack. Since the MHI stack itself is not involved in the physical data transmission, it relies on the existing physical interfaces to do DMA mapping, runtime PM handling of the device etc. Also, each controller will have a `mhi_dev` associated with it, which will be the child device of physical bus device as per the device model. + +Note that for the `struct device` created for controllers, there are no drivers to bind to. For this reason, the callbacks which are required to be present in the driver structure will be present in the `struct mhi_controller` itself. + +#### MHI Driver + +MHI drivers are client drivers which bind to MHI devices. The client drivers are used to send or receive upper protocol packets such as IP packets, modem control messages over the MHI bus. Each client driver will declare the MHI channels it binds to. As MHI devices are registered by the MHI core, MHI drivers are loaded, matched and probed in line with other busses. + +For instance, below is the channel declaration of the [QRTR MHI client driver](https://lkml.org/lkml/2020/1/31/316) included in the patch submission: + +```c +static const struct mhi_device_id qcom_mhi_qrtr_id_table[] = { + { .chan = "IPCR" }, + {} +}; +MODULE_DEVICE_TABLE(mhi, qcom_mhi_qrtr_id_table); +``` + +So the client driver binds to IPCR (IPC Router) channel. Note that, there can either be one MHI device per channel, or one for the MHI channel pair. This entirely depends on the controller driver configuration. + +```c +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); + struct device_driver driver; +}; +``` + +The above structure represents an MHI driver in the kernel. There is a `struct device_driver` for each MHI driver so that it can bind to the corresponding `struct device`. Also, there are few callbacks available which are required by the MHI stack. So a client driver should pass relevant functions for these. The purposes of these callbacks are explained below: + +| Name | Description | +| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `probe` | MHI client driver's probe function called during `mhi_driver_register` | +| `remove` | MHI client driver's remove function called during `mhi_driver_unregister` | +| `ul_xfer_cb` | Callback used by the MHI stack to notify the client driver of the uplink transfer status. This callback will be executed for both transfer success and failure. | +| `dl_xfer_cb` | Callback used by the MHI stack to notify the client driver of the downlink transfer status. This callback will be executed for both transfer success and failure. | +| `status_cb` | Callback used by the MHI stack to notify client driver of events such as pending data, state transition etc... | + +The registered client drivers will be available in sysfs under: `/sys/bus/mhi/drivers/` + +### Conclusion + +I hope that this article provided a good overview of MHI protocol and its implementation in the Linux kernel. Reviews are actively ongoing for this patchset and things are looking good for merging this in one of the upcoming releases. + +For more information on Linaro and the work we do, do not hesitate to [get intouch](https://www.linaro.org/contact/). diff --git a/src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx b/src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx new file mode 100644 index 0000000..8f3d714 --- /dev/null +++ b/src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx @@ -0,0 +1,30 @@ +--- +title: Microsoft to talk IoT security with Azure Sphere at the upcoming Linaro + Connect San Diego 2019 +date: 2019-09-10T08:22:35.000Z +image: linaro-website/images/blog/microsoft-sphere-2 +tags: + - linaro-connect + - linux-kernel + - security + - iot-embedded +author: andrea-gallo +related: [] + +--- + +In less than a week, we will be hosting Linaro Connect at San Diego Paradise Point, September 23-27, 2019. We are pleased to announce that the Microsoft Azure Sphere team will be joining us, presenting an Azure Sphere IoT Track on Tuesday, September 24. If you are attending Linaro Connect you can register for these sessions by going to the event schedule. For those following remotely, Linaro Connect keynotes will be streamed live on [Linaro’s YouTube channel](https://www.youtube.com/LinaroOrg/live) and resources for sessions will be made available by the end of the week on the [Linaro Connect website](https://resources.linaro.org/). Here is a snapshot of what you can expect: + +## How do we keep billions of internet connected devices secure? + +Ed Nightingale, Partner Director of Engineering for Microsoft Azure Sphere, will give a keynote on Tuesday, September 24 at 10:30 AM about the security challenges that come with connecting devices to the internet and what it takes to connect them securely. Billions of devices rely on microcontrollers—from microwaves to assembly line equipment—and they’re being connected to the internet for the first time. Connectivity brings new and exciting experiences, but most microcontrollers are not equipped for the inherent security challenges. Ed will talk you through the seven properties of highly secure devices, which Microsoft considers the standard for any device connected to the internet. For more information about Ed Nightingale or to read the complete keynote abstract, [click here](https://linaroconnectsandiego.sched.com/event/SwpH/san19-200k2-keynote-securing-the-billions-of-devices-around-us?iframe=no). + +## Fitting Linux security into 4MiB of RAM + +Following Ed Nightingale’s keynote, there will be a session at 11:00 AM led by Ryan Fairfax, Principal Software Engineering Lead for Microsoft Azure Sphere. Ryan will discuss the challenges of taking modern security techniques and adapting them to resource-constrained devices and the technical details of Azure Sphere security components. For more information about Ryan Fairfax or to read the complete abstract, [click here](https://linaroconnectsandiego.sched.com/event/SufQ/san19-210-azure-sphere-fitting-linux-security-into-4-mib-of-ram). + +## A view from industry: Securing IoT with Azure Sphere + +Ed Nightingale will wrap up the Azure Sphere IoT track at 12:00 PM with a technical session on Azure Sphere. Ed will talk through the critical market scenarios Azure Sphere addresses, dig deep into the silicon, software, and cloud architecture that make up the solution, and share some of the project’s history. Attend this session to explore the unique security design and capabilities that make Azure Sphere-certified chips unique and why an end-to-end IoT security solution is critical. For more information, [click here](https://linaroconnectsandiego.sched.com/event/SufT/san19-216-a-view-from-industry-securing-iot-with-azure-sphere) to read the complete abstract. + +For more information about Linaro Connect or how to register go to the [Connect page](/connect/). We hope to see you there! diff --git a/src/content/blogs/multiprocessing-and-openamp-messaging-demo.mdx b/src/content/blogs/multiprocessing-and-openamp-messaging-demo.mdx new file mode 100644 index 0000000..7e2ddd0 --- /dev/null +++ b/src/content/blogs/multiprocessing-and-openamp-messaging-demo.mdx @@ -0,0 +1,30 @@ +--- +title: Multiprocessing and OpenAMP Messaging Demo +description: > + Linaro and its members are developing a set of software components and tools + to help with a system approach to multiprocessing. Watch a demo video here. +date: 2020-02-26T11:54:46.000Z +image: linaro-website/images/blog/Billsdemo +tags: + - iot-embedded +author: bill-fletcher +related: [] + +--- + +Linaro and its members are developing a set of software components and tools to help with a system approach to multiprocessing. This short video shows a multiprocessing demo running on the STM32MP1 from STMicroelectronics. + +Featured Linaro work in this demo includes: + +* Devicetree - an on-going area of work in Linaro +* OpenAMP - a Linaro Community Project +* 96Boards specification - Avenger96 Community Board and mezzanine expansion +* Zephyr - Linaro is a contributor to the Zephyr project + +For more details of these technologies, please see our [previous blog on heterogeneous multicore systems](/blog/heterogeneous-multicore-systems-the-new-open-source-frontier/). + + + +Linaro brings together industry and the open source engineering community to collaboratively develop software on Arm. + +Linaro Developer Services can help you leverage open source on Arm to ensure fast time to market, exceptional quality and security, and cost effective long term maintenance. For more information go to https://www.linaro.org/services/. diff --git a/src/content/blogs/network-latency-with-tsn-on-virtual-machine.mdx b/src/content/blogs/network-latency-with-tsn-on-virtual-machine.mdx new file mode 100644 index 0000000..3ac5cfe --- /dev/null +++ b/src/content/blogs/network-latency-with-tsn-on-virtual-machine.mdx @@ -0,0 +1,135 @@ +--- +title: Network Latency with TSN on Virtual Machine +description: This blog talks about Time Sensitive Networking and its role in the + Software Defined Vehicle. Read more here! +date: 2022-12-01T03:24:01.000Z +image: linaro-website/images/blog/Automotive_Dashboard +tags: [] +author: takahiro-akashi +related: [] + +--- + +This blog series will describe our experiments in evaluating network latency with IEEE802.1 [TSN](https://1.ieee802.org/tsn/) (Time Sensitive Networking) technology, especially under virtual machine (hypervisor) environments. + +The first article will explain our assumptions, test system setup and different configurations of virtual network interfaces that we are going to evaluate. + +# Background + +Nowadays the Software Defined Vehicle (SDV) has been gaining momentum in the automotive industry. A modern vehicle has to deal with information from a number of sources. As well as data from the powertrain and chassis there are now increasingly Advanced Driver Assistance Systems (ADAS). The infotainment system has expanded from managing a radio and potentially a CD player to sourcing data from the outside world via the cloud. As such, more and more software components have been integrated in building and creating a modern vehicle. + +Linaro's white paper on [“Software Defined Vehicles and the Need for Standardization”](https://static.linaro.org/assets/automotive_white_paper_0921.pdf) surveys this trend, summarises architectural transformation along with standardized technologies being deployed in this segment and illustrates challenges that we are facing now and in the near future. + +In the traditional systems, fixed-function Electric Control Unit(ECU)s are connected directly to the sensors, relays and actuators, creating purpose-specific domains. Even today, as mentioned in Linaro's white paper, the number of ECUs in a premium car has increased significantly to 150 ECUs. + +As this number of ECUs grows into the hundreds, there is a push to integrate multiple services onto a smaller number of ECU servers. The logical next step is to virtualise their functions onto more powerful centralised ECUs by taking advantage of hypervisor isolation. + +The wiring harness has also been a big problem in terms of space and weight; According to [the article](https://semiengineering.com/shedding-pounds-in-automotive-electronics/), an average vehicle has 100-120 lbs of wire harness and a luxury car contains 1,500-2000 copper wires or over 1 mile in total length. + +In the zonal architecture, electric devices including sensors/actuators will be connected to ECUs on the servers via zonal gateways. Networks between them will then be converged and replaced from traditional CANbus technology to standard ethernet which will bring in a single unified bus as system-wide networking, simplifying the complexity of network management and reducing a total cost of physical wiring. + +![Image of zonal architecture](/linaro-website/images/blog/zonal-architecture-image) + +# Time Sensitive Networking and Performance + +As many of the automotive applications require some level of realtimeness, the default best effort approach in the ethernet protocol isn't suitable for reliable data exchanges over the network. Here we have a solution, Time Sensitive Networking (TSN). + +TSN is a collection of standards defined by IEEE801.2 and seen as extensions to IEEE801.Q (VLAN), addressing issues like network latency and robustness. TSN provides the predictable nature of real-time communications where particular packets are expected to be delivered within a specific time with minimum jitters whereas other low-priority traffic are also allowed on a best-effort basis. + +TSN includes several features like: + +* clock synchronization (Precision Time Protocol, or in short PTP): + all the clocks of the devices connected to the network, including NICs and routers, will be synchronized to behave based on shared time reference. +* various network schedulers/shapers: + Credit-based scheduler (cbs or Qav) provides fair scheduling, allowing managed bandwidth for each traffic, while time-aware shaper (tas or Qbv) enforces deterministic delivery under strict packet gate controls. There are more schedulers and shapers defined in the standards. +* resource management protocol (SRP): +* multiple paths and frame replication: + +With those technologies combined and properly configured, some level of determinism in packet delivery is guaranteed. This determinism is, however, limited at hardware or mac layer. As more software components, along with kernel or hypervisor interventions, get stacked both on sender side and receiver side, software plays dominant roles in increasing end-to-end latency in specific use cases. + +Linaro has been getting involved in TSN related development works and contributed to bringing them into mainstream for broader user experiences. At Linaro Connect Bangkok 2019, we also presented our experiments ([XDP for TSN](https://resources.linaro.org/en/resource/F6xXMMdkS6BjHdqri8Yy76) and [XDP Offload for OPC UA](https://resources.linaro.org/en/resource/TG82UyhpmJafAmEfiaWXP4)) and demonstrated a fast data path utilizing the AF\_XDP technology for minimizing the software overhead in the kernel network stack. + +We are now going to [move forward](https://linaro.atlassian.net/browse/STR-68); the ultimate goal is to optimise some software overhead and strive to guarantee the worst-case latency under a virtual machine environment. As a first step, we will measure and evaluate network performance, especially latency, for different implementations of virtual network interfaces and then look into some of the potential factors that might impose non-negligible overheads in packet delivery. + +# Latency Evaluation + +## Simple Scenario + +In a real-world car, there are plenty of different types of data transmitted between devices/gateways and ECU servers over the network. For instance, side or rear-view cameras should deliver view images in constant intervals to the center console (cluster). In powertrain and chassis control systems, more variety of data from sensors must be guaranteed to be transmitted over the ethernet with strict criteria and ECU application is also expected to be able to manage devices (like actuators) by sending control messages in a reliable manner. This type of traffic must be much more deterministic (with strict behaviors) than data streaming for infotainment and absolutely critical for the safety system. + +While we will have to learn more about system criteria, in particular, network traffic characters and related requirements, we cannot at this stage simulate the whole system with realistic applications, instead we would like to consider a simple use case with two different types of network traffic in this study. + +* latency-critical messages + Those may mimic periodic data transmission from device as well as some control messages from ECU +* bulk data streams on a best-effort basis + +Assuming the network configuration, including TSN, is properly set up and all the traffic is arbitrated at some manageable level, we can expect that this model will tell us enough fundamentals to understand basic behaviors of a system (OS and hypervisor) under mixed and congested network traffic load. + +![Image of evaluation model](/linaro-website/images/blog/evalutation-model-) + +## Virtual Network Interface + +We use kvm as a hypervisor in this study since there are several choices available for virtual network configurations. One of the advantages of using kvm is that, as it is integrated in the Linux kernel, using built-in instrument tools makes it easier to examine and analyze behaviors around networking in the system later on. + +a) bridge in host + +b) macvtap + +c) offload to userspace (vhost-user) + +In addition, we examine the PCI pass through for comparison purposes. + +d) PCI pass through (of physical device or SR-IOV virtual function) + +We always use virtio-net as a virtual network device on guest VM side since it is a common hypervisor-agnostic interface and can perform much better than a user-mode device fully-emulated by qemu. + +### bridge within host + +A tap device is an in-kernel network interface which corresponds to a virtual network exposed to a guest VM. Adding a tap device and a physical device on the host OS to the same bridge allows a guest VM to communicate with devices and gateways participating in the external network. + +We assume that vhost-net is on so that we can always save additional cost in virtqueue handling due to context switches and data copy. + +We think of following bridge implementations: + +a-1) kernel bridge: + +This is a default configuration when a network device is set up with a tap interface. + +![Image of kernel bridge](/linaro-website/images/blog/kernel-bridge) + +a-2) Open vSwitch: +The kernel bridge in (a-1) case will be replaced with in-kernel openvswitch module. Since packets are handled per-interrupt base and vhost-net and virtio components still remain in the data path, unlike "userspace offload" case below, we see little improvement thanks to this simple replacement. + +![Image of open vswitch](/linaro-website/images/blog/open-vswitch-image) + +a-3) XDP-based bridge: +Two XDP programs are to be installed for ingress and egress paths. When a new packet arrives at a port, its destination is looked up in an internal table by XDP program. If it is destined to guest VM (in ingress case), it will be redirected to a corresponding tap interface as a fast data path. Other packets are passed on to the normal kernel network stack (in this case, kernel bridge) as a slow data path. + +![Image of XDP-based bridge ](/linaro-website/images/blog/xdp-based-bridge) + +### macvtap + +Macvtap is a combination of macvlan and tap interface. Each macvtap interface has its own mac address on an associated physical network interface and all the packets destined to this address are directly redirected to a guest VM at the low layer of kernel network stack. + +![Image of macvtap](/linaro-website/images/blog/macvtap-image) + +### Userspace offload + +This is an advanced configuration to skip most of the kernel software stack on the host OS. A userspace application may fetch all the packets directly from a network device driver and re-routes them to guest VMs via vhost-user in userspace. + +Open vSwitch is one such technology and can utilize either DPDK (Data Path Development Kit) or AF\_XDP socket to bypass kernel overhead. AF\_XDP support seems to be experimental as of now, though. + +![Image of userspace offloading](/linaro-website/images/blog/userspace-offloading) + +### PCI pass through + +Some NICs have virtual functions of network interfaces (SR-IOV), which are directly exposed to guest VMs with assistance from VFIO framework and work as isolated network devices on VMs. As we skip most of the overhead imposed by the host or hypervisor, we can expect the optimal performance in a virtual machine environment. +This solution, however, comes with limitations. First, available virtual functions are subject to the hardware. Second, managing packets going out of different virtual machines is complicated and the traffic may not be well arbitrated to satisfy latency requirements. + +![Image of PCI pass-through](/linaro-website/images/blog/pci-pass-through) + +## Trailer + +In this article, we explained our planned methodology for latency evaluation. If you have any comments or suggestions on our approach or about assumption on system configuration, please let us know by emailing contact@linaro.org promptly so that we will perform more meaningful analysis and develop a more practical approach. + +Our next blog in this series will show the initial result of latency measurement with various virtual network interfaces. diff --git a/src/content/blogs/network-throughput-performance-improves-as-a-result-of-reworking-the-load-balance.mdx b/src/content/blogs/network-throughput-performance-improves-as-a-result-of-reworking-the-load-balance.mdx new file mode 100644 index 0000000..fdffa00 --- /dev/null +++ b/src/content/blogs/network-throughput-performance-improves-as-a-result-of-reworking-the-load-balance.mdx @@ -0,0 +1,77 @@ +--- +title: Network throughput performance improves as a result of reworking the load + balance. +description: "The scheduler has seen a lot of change over the past couple of + years with the introduction of Per Entity Load Tracking (PELT). This blog + talks about how it has been improved over time, including when wrong task + placement occurred and how this was fixed. " +date: 2020-04-06T03:10:19.000Z +image: linaro-website/images/blog/37319206961_0b863ab87d_k +tags: + - linux-kernel +author: vincent-guittot +related: [] + +--- + +The scheduler has seen a lot of change over the past couple of years with the introduction of Per Entity Load Tracking (PELT); PELT provides more fine grained statistics per task and group of tasks like the average CPU cycles used per a task and its impact on the load of the system. I started working on the scheduler a while back: improving PELT first and then studying wrong task placement and how to fix it. + +Fixing task placement was becoming increasingly difficult as the last remaining problems were more and more specific. The fixes involved more hacking, biasing the load balance algorithm with meaningless value rather than ensuring correct behavior. A typical example of this was the use of an average load per task which didn’t have any real meaning but was used to try to move at least one task instead of explicitly setting that we wanted to move a task. + +It became obvious in the community that a full rework of the load balancer was the best solution to move forward on the load balance. + +From this observation, the idea of a full rework of the load balance began to emerge. The aim of reworking the load balance has been to: + +* Clean up the code and remove old heuristics which are meaningless. +* Simplify the policy of task placement. +* Define more precisely the state of a group of CPUs, i.e.the group has spare capacity, is fully busy or is imbalanced because of pinned tasks, the running task doesn’t fit on local group, or the CPU with higher capacity is available. +* Describe exactly what the scheduler has to do to fix the imbalance: migrate some load, tasks or utilization of the CPU, or misfit task. +* Associate simple action to each state. + +These changes have been possible thanks to PELT and its various enhancements that provide a good understanding of the state of a group of CPUs. + +As explained above, the primary goal was to clean up and rework the load balance to ease the maintenance without introducing performance regressions, especially for systems like servers which are monitoring closely their throughput. The first test results, which have been run on an Arm system (embedded 8 cores and server 224cores/2 nodes), have not shown performance regression but the opposite. Results have shown improvements for low and medium load use cases where it’s more efficient to use the number of running tasks and the utilization of CPU to place tasks efficiently. + +* small arm64 dual quad cores system + +``` + tip/sched/core w/ this patchset improvement + +hackbench -l (2560/#grp) -g #grp + 1 groups 1.579 +/-29.16% 1.410 +/-13.46% (+10.70%) + 4 groups 1.269 +/-9.69% 1.205 +/-3.27% (+5.00%) + 8 groups 1.117 +/-1.51% 1.123 +/-1.27% (+4.57%) +16 groups 1.176 +/-1.76% 1.164 +/-2.42% (+1.07%) + +``` + +* large arm64 2 nodes / 224 cores system + +``` + tip/sched/core w/ this patchset improvement + +hackbench -l (256000/#grp) -g #grp + 1 groups 15.305 +/-1.50% 14.001 +/-1.99% (+8.52%) + 4 groups 5.959 +/-0.70% 5.542 +/-3.76% (+6.99%) + 16 groups 3.120 +/-1.72% 3.253 +/-0.61% (-4.92%) + 32 groups 2.911 +/-0.88% 2.837 +/-1.16% (+2.54%) + 64 groups 2.805 +/-1.90% 2.716 +/-1.18% (+3.17%) +128 groups 3.166 +/-7.71% 3.891 +/-6.77% (+5.82%) +256 groups 3.655 +/-10.09% 3.185 +/-6.65% (+12.87%) + +dbench + 1 groups 328.176 +/-0.29% 330.217 +/-0.32% (+0.62%) + 4 groups 930.739 +/-0.50% 957.173 +/-0.66% (+2.84%) + 16 groups 1928.292 +/-0.36% 1978.234 +/-0.88% (+0.92%) + 32 groups 2369.348 +/-1.72% 2454.020 +/-0.90% (+3.57%) + 64 groups 2583.880 +/-3.39% 2618.860 +/-0.84% (+1.35%) +128 groups 2256.406 +/-10.67% 2392.498 +/-2.13% (+6.03%) +256 groups 1257.546 +/-3.81% 1674.684 +/-4.97% (+33.17%) + +``` + +Other people from the community started to raise interest in the rework and wanted to also fix old problems like the suboptimal use of cores on NUMA systems : [The Linux Scheduler: a Decade of Wasted Cores](https://people.ece.ubc.ca/sasha/papers/eurosys16-final29.pdf). Some regressions have been raised during the review but thanks to the cleanup work, it has often been straightforward to fix them because the culprit piece of code was self contained, we therefore didn’t have to worry about the side effects to unrelated configurations. + +Although we tried to cover a wide range of behavior during the development and review phases, it was impossible to cover all use cases. Nevertheless, the patchset was considered to be mature enough to be queued for v5.5 with the constraint that we will have to be reactive to fix every regression reported, and that the patchset would be reverted if we were not able to fix it. + +The main perf regression raised after merging the patchset and before the release of v5.5, has been the one related to kexec and fork perf regression. No other regression has been raised and the rework is now part of the v5.5. This is probably just the beginning because the rework will be used more and more with distro moving to a more recent kernel and we can expect more test results. In fact, this has already happened with the Vmware perf team reporting around +20-25% of network throughput for one of their performance tests[ https://lkml.org/lkml/2020/2/25/38](https://lkml.org/lkml/2020/2/25/38) . It’s a bit unusual to receive performance improvements feedback on a mailing list as we are more used to getting regression notifications but such good feedback is always encouraging and confirms that the rework was a good thing to do. diff --git a/src/content/blogs/new-trust-sources-for-linux-kernel-keyrings.mdx b/src/content/blogs/new-trust-sources-for-linux-kernel-keyrings.mdx new file mode 100644 index 0000000..d9e7025 --- /dev/null +++ b/src/content/blogs/new-trust-sources-for-linux-kernel-keyrings.mdx @@ -0,0 +1,65 @@ +--- +title: New Trust Sources for Linux Kernel Keyrings +description: In this blog, our Engineer talks about how Linaro helped generalize + the Trusted Keys sub-system in Linux to add support for new trust sources. + Read more here. +date: 2021-11-02T01:53:36.000Z +image: linaro-website/images/blog/Security_screen +tags: + - linux-kernel + - security +author: sumit-garg +related: [] + +--- + +![Linux Keyrings Image](/linaro-website/images/blog/linux-keyrings) + +## Introduction + +Protecting key confidentiality is essential for many kernel security use-cases such as disk encryption, file encryption and protecting the integrity of file metadata. Trusted keys are symmetric keys created within the kernel. The kernel also provides a mechanism to export trusted keys to user-space for storage as an opaque blob and for the user-space to later reload them onto Linux keyring without the user-space knowing how to decrypt the opaque blob. Trusted keys make it impossible for userspace compromises to leak key material. In order to embed trust in Trusted Keys however, there is a requirement for the availability of a Trust Source. + +In this blog, we will look at how we generalized the Trusted Keys sub-system in Linux. This has made it easier for kernel security developers to add support for new trust sources and reduce downstream kernel technical debt. + +## What is a trust source? + +A trust source provides the source of security for Trusted Keys. New trusted keys are created from random numbers generated in the trust source. Trusted keys can be encrypted/decrypted using a unique secret key known only to the trust source, which never leaves the trust source’s boundary. Unique secret key usage via crypto operations is protected by a strong access control policy within the trust source. + +## Background: Trusted Platform Module as a trust source + +Trusted Keys were introduced in the Linux kernel from v2.6.38. Since this feature’s inception, the only trust source has been provided by the Trusted Platform Module (TPM). Thus, if a user needs to leverage Trusted Keys support on specific hardware, there is a need to deploy a TPM device. This requirement made the Trusted Keys feature unavailable on many embedded systems as most of them do not possess a TPM device and adding one to the Bill of Materials is viewed by vendors as expensive. + +![Trusted Platform Module Image](/linaro-website/images/blog/trusted-platform-module) + +## Using TrustZone as a trust source + +Many embedded systems do come with alternative hardware mechanisms such as Arm TrustZone, crypto engines etc. that are capable of providing a source of trust. These mechanisms can be leveraged to support Trusted Keys encryption and decryption operation. This has led to the implementation of software-based TPM but that too has its shortcomings. A full-featured software TPM is a large and complicated software stack. This makes it difficult to port and, on constrained devices with limited flash space, it may be difficult to fit along with the boot firmware. + +A Trusted Execution Environment (TEE) based on Arm TrustZone provides hardware based isolation to perform trusted operations. For example, the open source TEE implementation, [Open Portable TEE (OP-TEE)](https://optee.readthedocs.io/en/latest/), is supported on approx. 80 platforms from various SoC vendors. OP-TEE offers a standardized TEE client API (compliant with GlobalPlatform TEE Client API [Specification v1.0](https://globalplatform.org/specs-library/tee-client-api-specification/)) to perform cryptographic operations using a Hardware Unique Key (HUK) that is only accessible within the TEE. The HUK can be utilized to perform encrypt/decrypt operations for Trusted keys. The encrypted trusted key blob can be exported to user-space which can later be decrypted and loaded in kernel keyring. + + + +## Adding a trust source framework + +Back in June 2019, we shared the initial [RFC patch-set](https://lore.kernel.org/lkml/1560421833-27414-1-git-send-email-sumit.garg@linaro.org/) to add support for a standalone TEE based trusted keys module. This initial version operated by replacing the existing trusted keys module which was tightly coupled to use TPM as a trust source. This RFC received encouraging feedback from Jarkko Sakkinen (the trusted keys co-maintainer) but we were asked to be more aggressive in refactoring the existing TPM code to avoid code duplication between the TPM and TEE code bases and abstract out common APIs. + +Based on this feedback our first step was to refactor the existing TPM1 and TPM2 code into a trusted keys sub-system. At this stage the new sub-system didn't add any new features but it consolidated trusted keys code. These changes landed in the mainline kernel and were released in v5.5. + +Our next step was to add an additional trust source, allowing the TEE to provide these services instead of relying upon a TPM. These patches started to attract attention from a wider range of reviewers. Several reviewers were concerned that having both TPM and TEE as trust sources might inadvertently suggest the implementations have identical security properties. TEE trust sources, whether implemented as software-TPM or direct TEE implementations, certainly do have different benefits and drawbacks compared to hardware TPM implementations. Consensus was eventually reached by observing the choice between competing security approaches is not really a job for the kernel. Instead it is a decision to be made when choosing what hardware to deploy the kernel on. Thus the tension was largely resolved through careful documentation, in particular in the implementation guidelines corresponding to the different trust sources. + +Two other review comments in particular helped us improve the solution. The first was a request for a kernel module parameter to allow a user to force a particular choice for a trust source in cases where both TPM and TEE are present. The second suggestion was to optimize the trust source callbacks by using [static calls](https://lwn.net/Articles/815908/) instead of indirect pointer dereferences. + +As a result, as part of the v5.13 kernel release cycle, the trust source framework and a new trust source as TEE made its way to the mainline kernel. Many thanks to all who were involved in the review and testing process. + +![Trusted Keys Core Image](/linaro-website/images/blog/trusted-keys-core) + +## Using crypto hardware as a trust source + +Since the Trusted Keys sub-system was introduced to the kernel in 5.13, it has gained some attention from the kernel community. Ahmad Fatoum from Pengutronix has proposed a new trust source based on NXP’s [Cryptographic Acceleration and Assurance Module (CAAM)](https://lore.kernel.org/linux-integrity/cover.9fc9298fd9d63553491871d043a18affc2dbc8a8.1626885907.git-series.a.fatoum@pengutronix.de/). The CAAM is included in recent NXP’s i.MX and QorIQ SoCs. It can directly Advanced Encryption Standard (AES) encrypt/decrypt user data using a unique never-disclosed device-specific key. We were rather flattered by the [Ahmad’s summary](https://lore.kernel.org/linux-integrity/1530428a-ad2c-a169-86a7-24bfafb9b9bd@pengutronix.de/) of how adding support for pluggable trust sources improves the upstream kernel: + +*“The users I meant are humans, e.g. system integrators. They need to think about +burning fuses, signing bootloaders, verifying kernel and root file systems, encrypting file systems and safekeeping their crypto keys. Ample opportunity for stuff to go wrong. They would benefit from having relevant kernel functionality integrated with each other instead of having to carry downstream patches, which we and many others did for years. We now finally have a chance to drop this technical debt thanks to Sumit's trusted key rework and improve user security along the way.”* + +We have also seen patches from Richard Weinberger who has proposed a trust source using a simpler NXP device called the [Data Co-Processor (DCP)](https://lore.kernel.org/linux-integrity/20210614201620.30451-1-richard@nod.at/). This peripheral is found on older NXP SoCs such as i.mx6ull. Its big brother, CAAM, can directly encrypt and decrypt blobs in hardware but the DCP cannot do this. Instead the DCP is capable of performing AES operations using hardware-bound keys. These keys are not accessible to the operating system, although the encryption/decryption operation needs aid from software. + +Overall we have been very pleased with the progress so far. Having new ways to exploit security features on Arm platforms with TrustZone support is exciting. Likewise we have been delighted to see this work open the door to adding further support for alternative crypto hardware, especially given the minimal changes the patches to date have required of the generic sub-system code. diff --git a/src/content/blogs/next-qemu-development-cycle.mdx b/src/content/blogs/next-qemu-development-cycle.mdx new file mode 100644 index 0000000..4891479 --- /dev/null +++ b/src/content/blogs/next-qemu-development-cycle.mdx @@ -0,0 +1,105 @@ +--- +title: Linaro’s Future Development Plans for QEMU +description: In this article, Alex Bennée gives a summary of Linaro's + engineering plans for QEMU. Read more here! +date: 2022-10-26T08:00:00.000Z +image: linaro-website/images/blog/code-background_1 +tags: + - qemu + - open-source + - arm + - linux-kernel +author: alex-bennee +related: [] + +--- + +# Introduction + +QEMU is an open source machine emulator and virtualiser that Linaro has been involved with since its creation. It provides a way of running Arm code on developer machines without access to actual Arm hardware. This makes it very useful for writing software before silicon is available. We have worked on improving the Arm emulation since the introduction of the original 64 bit v8 architecture. We have also been involved in improving support for the Arm platforms when running Virtual Machines (VMs) using technologies like the Kernel Virtual Machine (KVM). + +Last week during the Linaro members’ operational meeting I laid out our development plans for QEMU over the next 6-12 months. Before I launch into the details I want to give an overview about how we prioritise our work given the mission statement of the project to [enable the Arm architecture in QEMU](https://linaro.atlassian.net/wiki/spaces/QEMU/overview). + +## Ensure a well maintained upstream + +It should be no secret that Linaro heavily invests in its maintainers. We believe it is important to our success to have engineers who are familiar with the code bases they work on. While [kernel work](https://www.linaro.org/blog/linaro-in-top-five-for-most-active-contributors-to-the-6-0-linux-kernel-release/) often grabs the headlines, our involvement in QEMU is also deep and sustained. + +\| Red Hat | 2066 | (28.0%) | +\| Linaro | 1601 | (21.7%) | +\| (None) | 824 | (11.2%) | +\| IBM | 593 | (8.0%) | +\| Instituto de Pesquisas Eldorado | 265 | (3.6%) | + +However we are not simply proxies for our members - most of our members with an interest in QEMU also contribute directly. To show this I added up their contributions (not including RedHat) for the last year and regenerated the stats. + +\| Red Hat | 105250 | (27.8%) | +\| Linaro | 64337 | (17.0%) | +\| (None) | 38457 | (10.2%) | +\| Linaro Members (combined) | 30541 | (8.0%) | +\| IBM | 28616 | (7.6%) | + +As you can see everyone benefits from having a well maintained upstream that you can reliably develop your features on. + +## Upstream useful architectural features + +The main reason users want to use Arm on QEMU is so they can develop code for new architectural features before hardware becomes available. We have a long history of enabling features from our early [TrustZone](https://www.linaro.org/blog/arm-trustzone-qemu/) work to innovations like [Scalable Vector Extensions](https://www.linaro.org/blog/sve-in-qemu-linux-user/). While hardware is only just coming onto the market that supports SVE, most of the software enablement was done with the help of QEMU. + +In the last year we have been busy filling in a number of the smaller features required for higher baseline CPUs. This included support for various addressing modes for large virtual and physical address spaces (FEAT\_LPA, FEAT\_LPA2, FEAT\_LVA). Perhaps the biggest set of new instructions was for Arm's [Scalable Matrix Extensions](https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/scalable-matrix-extension-armv9-a-architecture) +(SME) which provide for efficient matrix operations which are important for modern Machine Learning (ML) and Artificial Intelligence (AI) applications. + +All of these are available when you use `-cpu max` in your QEMU invocation. You can see the ever growing list of Arm features we support in the [QEMU manual](https://qemu.readthedocs.io/en/latest/system/arm/emulation.html). + +## Support QEMU as a software reference platform + +I've mentioned before in [previous blogs](https://www.linaro.org/blog/many-uses-of-qemu/) how many projects use QEMU as a reference platform. I find new projects every year that target a QEMU as an easy to access platform for those who wish to experiment with something without the outlay of finding the right reference board. + +This year we achieved our first certification for the +[sbsa-ref](https://qemu.readthedocs.io/en/latest/system/arm/sbsa.html) +machine which provides a well defined testing base for firmware +development. The [SystemReady VE v0.5 (ES) (Level 1)](https://www.arm.com/architecture/system-architectures/systemready-certification-program/ve) +certificate can be downloaded from Arm's website. As we continue to add newer "concrete" CPUs alongside `-cpu max` we will be able to aim for higher levels of certification. + +## Improving the developer experience + +Finally, as QEMU is a tool used by developers, improving their +experience is our final primary goal. We do this by helping maintain +essential tools like the +[gdbstub](https://qemu.readthedocs.io/en/latest/system/gdb.html) for +debugging and semihosting which aids early stage bare metal +development. We are also developing [TCG +plugins](https://qemu.readthedocs.io/en/latest/devel/tcg-plugins.html) +which allow for dynamic analysis of running code as well as continuing to improve QEMU's above-the-OS user mode emulation. Someone also reminded me that QEMU's open source nature is a plus in itself because the humble `printf` can be deployed to dump deep state information about a system when a program is behaving strangely. + +# The next cycle + +With our work over the last year outlined, let's talk about what we are working on in the coming months. + +## v9.0 Baseline CPU + +The next generation of the Arm architecture was [announced last +March](https://www.arm.com/company/news/2021/03/arms-answer-to-the-future-of-ai-armv9-architecture) +and continues to add new features enhancing security and performance for a number of workloads. To reach the point of emulating an architecturally correct v9.0 baseline CPU we need to fill in some holes of previously optional features. You can track the work towards that goal on our JIRA by following the [main EPIC for v9.0](https://linaro.atlassian.net/browse/QEMU-471). + +## Confidential Computing and Realms + +One of the biggest parts of the v9.0 announcement was the introduction of Arm's [Confidential Compute Architecture](https://www.arm.com/architecture/security-features/arm-confidential-compute-architecture). This is a big shift in the security architecture of computing which allows secure workloads to be run on cloud systems while guaranteeing the cloud provider cannot look inside the confidential workload. As you can imagine this involves a lot of individual components from the base firmware to the hypervisor and kernel as well as the rest of the cloud software stack. We want to enable the underlying Realm Management Engine (FEAT\_RME) to support the development of software on this new and innovative stack. + +There are a number of challenges for us to solve on the way to this +including the Large System Extensions (FEAT\_LSE2) which will require +careful modification to QEMU's core translation code to properly model +the new atomicity and alignment requirements of these systems. We've +already [started posting +patches](https://patchew.org/QEMU/20221021071549.2398137-1-richard.henderson@linaro.org/) +towards that goal but we expect it to be a significant chunk of work. You can follow the work [here](https://linaro.atlassian.net/browse/QEMU-300). + +We expect it will take us most of the next year to implement and test all the bits and pieces for a full working system but that time can be shortened by collaborating with us on the mailing lists. You can track the main work for [FEAT\_RME here](https://linaro.atlassian.net/browse/QEMU-466). + +## Single Emulation Binary + +While our principal focus will be on implementing v9.0 and Realms we are also going to start looking at a long term goal of a single emulation binary. This aims to make QEMU modular enough that instead of building a binary for every target architecture we can build a single one capable of emulating any hardware QEMU supports. While this will be useful for the upstream project by reducing build times and reducing inadvertent technical debt we have longer term ambitions. We hope this [work](https://linaro.atlassian.net/browse/QEMU-487) will allow us to explore more complex modelling opportunities in future. + +# Conclusion + +I hope this blog has given you an idea of the sort of things we work on and our vision for the future of emulating Arm systems in QEMU. We look forward to collaborating with our members and the wider community to realise this vision on the mailing lists over the next year. + +For more information on the work we do on QEMU and how to get involved, go to our [Enable Arm Architecture in QEMU project page](https://linaro.atlassian.net/wiki/spaces/QEMU/overview). diff --git a/src/content/blogs/op-tee-and-the-need-for-ff-a.mdx b/src/content/blogs/op-tee-and-the-need-for-ff-a.mdx new file mode 100644 index 0000000..89d187d --- /dev/null +++ b/src/content/blogs/op-tee-and-the-need-for-ff-a.mdx @@ -0,0 +1,127 @@ +--- +title: "OP-TEE and the need for FF-A " +description: In this blog, we take a look at how OP-TEE and FF-A have evolved + and why you need FF-A to enhance security. +date: 2022-03-31T11:53:22.000Z +image: linaro-website/images/blog/Linaro-and-Riscure-release-banner +tags: + - security + - open-source + - arm +author: jens-wiklander +related: [] + +--- + +# Introduction + +For everyone not up to date with FF-A and OP-TEE here's a quick recap. + +OP-TEE was released in 2014 as an open source Trusted Execution Environment (TEE) which implements the Arm TrustZone technology. Arm Firmware Framework for Arm A-profile (FF-A) is a new way of communicating between the normal world and the secure world. OP-TEE has support for this in configurations where this is enabled. In other configurations without FF-A, OP-TEE still supports the old communication protocol. + +In this blog we will discuss the need for FF-A and what we have planned for the future. + +# **Why do we need FF-A?** + +With Arm v8.4, a secure counterpart to EL2 (Exception Level 2, hypervisor mode) is added, called S-EL2, as a way of isolating the trusted OS at S-EL1. Until Arm v8.4, S-EL1 had access to the entire system. This is usually much more than needed, since the Trusted OS doesn't need to poke into the internals of Trusted Firmware at EL3 or into random places in the non-secure world. Adding S-EL2 is a way of eliminating the Trusted OS from the TCB (Trusted Computing Base). For more information on exception levels, take a look at the Arm document [Learn the architecture: TrustZone for AArch64](https://developer.arm.com/documentation/102418/0101/?lang=en), specifically the section “Secure virtualization” under the “TrustZone in the processor” heading. The Arm document [Learn the architecture: AArch64 Exception model](https://developer.arm.com/documentation/102412/0100) also gives an introduction to exception levels. + +When switching from EL1 (typically Linux kernel) to S-EL1 (Trusted OS) the CPU needs to transition through the levels as follows: EL1 -> EL2 -> EL3 -> S-EL2 -> S-EL1. Each exception level has a separate binary so when transitioning between two exception levels, both binaries must agree on the ABI (Application Binary Interface), which results in quite a few ABIs to keep track of, especially considering multiple trusted OSs. That’s one of the objectives with FF-A, to have the same ABI even if a different trusted OS or hypervisor is chosen. Unchanged ABIs at EL3 means that a certified Trusted Firmware can be reused unchanged in other configurations, the same applies for the hypervisor and SPMC. + +FF-A can also be implemented for Arm v7, but the gains are fewer. When switching from EL1 (non-secure SVC mode) to S-EL1 (secure SVC mode) the CPU needs to transition through these levels: EL1 -> EL2 (non-secure HYP mode) -> MON mode -> S-EL1. The entire secure side is normally controlled by the trusted OS, but it should be possible to have a trusted OS agnostic hypervisor at EL2. Since there are fewer exception levels and separate firmwares involved on Arm v7 we don’t improve as much. Some effort can be saved with the hypervisor in case it can be reused from an earlier port to Arm v8 already using FF-A. This might even be the biggest reason to use FF-A on Arm v7, to be able to use software configured in a similar way as on Arm v8. + +# Current status + +We have upstream support in the Linux kernel for OP-TEE and FF-A version 1.0. FF-A abstracts the rest of the configuration from the kernel so with this in place we should be done in this domain. + +We have upstream support in OP-TEE OS for FF-A with SPMC (Secure Partition Manager Core) at S-EL2, in a secure hypervisor, or at S-EL1 as part of OP-TEE. A small amount of platform specific code or configuration is needed so we are limited to plat-vexpress and plat-totalcompute. + +![Example of configuration with SPMC at S-EL1](/linaro-website/images/blog/example-configuration-with-spmc-at-s-el1) + +We have upstream support in TF-A for FF-A with SPMC at S-EL2 and S-EL1. Again a small amount of platform specific code or configuration is needed. Plat/arm and plat/qemu/qemu have the most support. Plat/qemu/qemu can for instance be configured for FF-A with SPMC at S-EL1. + +Hafnium is the reference SPMC at S-EL2, for more information see [Hafnium in the secure world at Trusted Firmware](https://trustedfirmware-a.readthedocs.io/en/latest/components/secure-partition-manager.html#hafnium-in-the-secure-world). + +![Example of configuration with SPMC at S-EL2](/linaro-website/images/blog/example-configuration-with-spmc-at-s-el2) + +# Future directions + +While the basic support is in place for FF-A 1.0, there are still a few things left to do, either to catch up with FF-A 1.1 or to broaden the 1.0 support. The following areas have been identified: + +* SPMC at EL3 +* FF-A support in Xen mediator +* OP-TEE as S-EL1 SPMC for S-EL0 Secure Partitions +* FF-A version 1.1 + + * Secure and non-secure interrupt handling + * Asynchronous notifications + +SPMC at EL3 and OP-TEE and S-EL1 SPMC are two features to help deploy FF-A on hardware before Arm v8.4. FF-A support will be needed in Xen eventually, starting with just what is needed by OP-TEE is a step in that direction. + +The areas above are tied closely to FF-A. There are other areas where OP-TEE will be extended, but those are not centred around FF-A. + +# Secure Partition Manager Core (SPMC) at EL3 + +The SPMC is integrated with the SPMD (Secure Partition Manager Dispatcher) in the firmware when located at EL3. + +In configurations without S-EL2, typically before Arm v8.4, S-EL1 and EL3 share the secure physical address space. S-EL1 may try to avoid mapping the physical memory used in EL3 but the CPU architecture cannot enforce this. + +This configuration enables running SPs (Secure Partitions) designed to be used with an SPMC at S-EL2 in architectures without S-EL2. There are two significant differences in this configuration, the SMC (Secure monitor call) instruction must be used instead of the HVC (Hypervisor call) instruction and all memory addresses are physical instead of IPA. The latter should be transparent to the SP in practice. + +This configuration enables using an almost standard S-EL1 SP instead of using a hybrid with an SPMC together with a logical partition when S-EL2 is not available. + +![Example of configuration with SPMC at S-EL3](/linaro-website/images/blog/example-configuration-with-spmc-at-el3) + +Comparing this configuration with “SPMC at S-EL1” from OP-TEE, or any other S-EL1 SP, point of view will be very similar. + +There is work ongoing with the [SPMC](https://review.trustedfirmware.org/q/topic:%2522ffa_el3_spmc%2522+\(status:open+OR+status:merged\)) at Trusted Firmware. Based on this we have also made a prototype on QEMU ARMv8-A. This can be tested with: + +``` +repo init -u https://github.com/jenswi-linaro/manifest.git \ + -m qemu_v8.xml -b poc/qemu_v8_el3_spmc +repo sync -j8 +cd build +make toolchains -j8 +make SPMC_AT_EL=3 all -j8 +make run-only +``` + +This prototype will be available until everything is upstream. + +# FF-A support in Xen mediator + +OP-TEE and Xen can already coexist in a configuration without FF-A using the older OP-TEE specific communication protocol. With FF-A there is an opportunity to make a generic implementation in Xen. With that it should in principle be possible to replace OP-TEE at S-EL1 with any other SP if desired. Or one could also consider the advantage of consolidating the mediators needed for other secure world entities. + +![Example of configuration with Xen Mediator](/linaro-website/images/blog/example-configuration-with-xen-mediator) + +OP-TEE is in this example providing one virtual instance for each guest. From a guest point of view it looks like the partition is only OP-TEE without interference from other guests. The nexus is a part of OP-TEE which does the switching for different partitions. + +This was prototyped during Q3 2021 which resulted in a few hooks in FF-A for SPs to subscribe to events from the hypervisor. + +Next step is to wait for FF-A 1.1 and then update the prototype with the new events on VM creation and destruction. + +# OP-TEE as S-EL1 SPMC for S-EL0 Secure Partitions + +In a configuration without S-EL2 OP-TEE can act as SPMC for SPs at S-EL0. + +![Example of configuration with OP-TEE as SPMC for S-EL0 SPs](/linaro-website/images/blog/example-configuration-with-op-tee-as-spcm-for-s-el0-sps) + +Trusted Applications are coexisting with SPs at S-EL0 in this configuration. This work is progressing steadily with a trusted firmware [roadmap](https://developer.trustedfirmware.org/w/trusted-services/roadmap/). This configuration is useful on architectures before Arm v8.4. + +## FF-A version 1.1 - secure and non-secure interrupt handling + +FF-A 1.1 brings updated guidance on interrupt management. This is expected to have minimal impact on upstream code when updating for 1.1. For work in progress especially where OP-TEE is acting as SPMC for S-EL0 SPs may need a bit more. + +## FF-A version 1.1 - asynchronous notifications + +Asynchronous notifications is a new feature in FF-A. Some work has already been done to prepare for this with a corresponding feature in the old OP-TEE specific communication protocol. + +For OP-TEE this is used to enable top-half and bottom-half types of drivers. The pattern is as follows. A minimal interrupt routine records that a device requires attention and sends a notification to the normal world driver. The driver then does a yielding call “do bottom half” allowing this part of the driver to synchronise with other threads using mutex or even do RPC. This is also nice for the scheduler as yielding calls are scheduled by normal world while a secure interrupt steals CPU cycles. + +# Stay tuned + +* [Subscribe](https://lists.trustedfirmware.org/mailman3/lists/op-tee.lists.trustedfirmware.org/) to the OP-TEE mailing list op-tee@lists.trustedfirmware.org +* Join the [Linaro OP-TEE Contributions (LOC) monthly meeting](https://www.trustedfirmware.org/meetings/) or check out the project page [Linaro's OP-TEE Contributions - Confluence](https://linaro.atlassian.net/wiki/spaces/LOC/overview) +* Visit the [OP-TEE page at trusted firmware](https://www.trustedfirmware.org/projects/op-tee/). + +Thank you for reading this far. If you have any questions or thoughts feel free to +create an issue at [https://github.com/OP-TEE/optee\_os/issues](https://github.com/OP-TEE/optee_os/issues) or to reach out at the mailing list. You’re also welcome to join the LOC meetings. You can also find out more on this topic by watching [the session](https://resources.linaro.org/en/resource/2aHhsEXr7LVcdwH62LrTQ6) we presented at Linaro’s Core Technologies Tech Day earlier this week. diff --git a/src/content/blogs/open-on-chip-debugger-ocd-at-linaro.mdx b/src/content/blogs/open-on-chip-debugger-ocd-at-linaro.mdx new file mode 100644 index 0000000..827d771 --- /dev/null +++ b/src/content/blogs/open-on-chip-debugger-ocd-at-linaro.mdx @@ -0,0 +1,41 @@ +--- +title: Open On-Chip Debugger (OpenOCD) at Linaro +description: In this article Omair Javaid takes a look at what an Open On-Chip + Debugger (Open-OCD) is and how Linaro contributes to the OpenOCD Project. Read + more here! +date: 2020-09-30T11:34:17.000Z +image: linaro-website/images/blog/electricity-1288717_1920-1- +tags: + - toolchain + - arm +related_projects: + - GNU + - LLVM +author: omair-javaid +related: [] + +--- + +## What is Open on Chip Debugger? + +The [Open On-Chip Debugger (OpenOCD)](http://www.openocd.org/) is an open source software development tool which allows on-chip debugging and programming of applications via JTAG/SWD hardware interface. OpenOCD runs on a host computer along with a debugger like GDB. GDB communicates with OpenOCD over RSP protocol similar to debugging an application running on hardware. + +![OpenOCD-flow diagram](/linaro-website/images/blog/open-ocd-flow-diagram) + +## How does Linaro contribute to the OpenOCD Project? + +Linaro has been actively involved in the OpenOCD project since 2016 where we initially started to help the community with upstreaming of Arm v8 AArch64 support in OpenOCD. More information on where we started with Arm v8 upstreaming work can be found [here](https://collaborate.linaro.org/display/TCWGPUB/OpenOCD+for+AArch64). + +In the last few years Linaro toolchain team has actively participated in development, upstreaming and validation of various Arm architecture specific features in OpenOCD. We have also served as liaison between OpenOCD and GDB/LLDB debugger projects. + +Linaro toolchain working group maintains [a wiki area for OpenOCD](https://collaborate.linaro.org/display/TCWGPUB/OpenOCD+@+Linaro) containing how-to documents enabling developers to quickly get started with OpenOCD development. In absence of an active testing infrastructure for OpenOCD we have developed instructions for developers to quickly test and validate OpenOCD on Arm architecture. + +## How do I get involved? + +Following pages provide quick start for validation and testing of OpenOCD on Arm: + +* [Getting started with OpenOCD Development](https://collaborate.linaro.org/display/TCWGPUB/Getting+started+with+OpenOCD+Development) +* [Raspberry Pi Linux kernel debugging with OpenOCD](https://linaro.atlassian.net/wiki/spaces/TCWGPUB/pages/25296346120/Raspberry+Pi+Linux+kernel+debugging+with+OpenOCD) +* [Debug Zephyr app on Nitrogen board with OpenOCD](https://collaborate.linaro.org/display/TCWGPUB/Debug+Zephyr+app+on+Nitrogen+board+with+OpenOCD) + +To find out more about Linaro and the work we do, [contact us here](https://www.linaro.org/contact/). diff --git a/src/content/blogs/openamp-becomes-a-linaro-community-project.mdx b/src/content/blogs/openamp-becomes-a-linaro-community-project.mdx new file mode 100644 index 0000000..6c2ce52 --- /dev/null +++ b/src/content/blogs/openamp-becomes-a-linaro-community-project.mdx @@ -0,0 +1,23 @@ +--- +title: OpenAMP becomes a Linaro Community Project +description: In this article, we talk about OpenAMP Project joining Linaro + Community Projects Division which manages open source community projects. Read + more here. +date: 2019-09-23T00:00:00.000Z +image: linaro-website/images/blog/openampproject-banner-updates +tags: + - arm + - linux-kernel + - open-source +author: linaro +related: [] + +--- + +Earlier today, the OpenAMP Project joined the Linaro Community Projects Division, the division of Linaro managing open source community projects, including Trusted Firmware and devicetree. The OpenAMP project aims to standardize interactions between operating environments in a heterogenous embedded system through open source solutions. + +It is a framework providing the software components needed to enable the development of software applications for asymmetric multi-processing (AMP) systems. This allows operating systems to interact within a broad range of complex homogeneous and heterogeneous architectures, as well as asymmetric multiprocessing applications to leverage parallelism offered by the multicore configuration. + +Membership is open to both Linaro members and non-members. Current members include  Arm, Kalray, Linaro, Mentor, ST Microelectronics, Wind River and Xilinx. + +To find out more about OpenAMP and how to get involved, go to [www.openampproject.org](http://www.openampproject.org). diff --git a/src/content/blogs/openembedded-yocto-project-for-kernel-developers.mdx b/src/content/blogs/openembedded-yocto-project-for-kernel-developers.mdx new file mode 100644 index 0000000..f6b060d --- /dev/null +++ b/src/content/blogs/openembedded-yocto-project-for-kernel-developers.mdx @@ -0,0 +1,231 @@ +--- +title: OpenEmbedded/Yocto Project for Kernel Developers +description: "In this blog we look at how to have a working filesystem image + ready to deploy to a device through OpenEmbedded and Yocto. " +date: 2022-09-06T00:34:53.000Z +image: linaro-website/images/blog/Code_Image_Core_tech +tags: + - iot-embedded + - linux-kernel +author: daniel-thompson +related: [] + +--- + +OpenEmbedded and the Yocto Project provide powerful tools that are capable of building fully fledged GNU/Linux distributions complete with OS images and package streams ready to push to your update servers. However OpenEmbedded can also be scaled back and used to build simple custom root file systems suitable for kernel development and testing. In this blog post we will attempt to map the shortest route a kernel programmer could take from the initial git fetch to having a working filesystem image ready to deploy to a device. + +# What’s so special about kernel development? + +In the context of embedded Linux distributions then the most obvious, and most unique, thing about kernel developers is that they normally bring their own kernel; they do not need or want any kernel to be bundled with the filesystem! For that reason this post will not mention anything about how to cross-compile the kernel. We will assume you already have that covered and that you have a kernel in `arch/${ARCH}/boot/Image` ready to go. You just need a userspace to partner with it! + +The other major thing that affects kernel hackers more than other developers is that, at least some of the time, they will spend time working on kernels with missing or incomplete drivers. It is therefore useful to have a test image that can be deployed on minimised kernels with few features enabled. + +In short, to meet a kernel developer’s needs we want to build images such that: + +* The filesystem is available in multiple formats: [cpio](https://en.wikipedia.org/wiki/Cpio) (to allow run-from-RAM systems based on initramfs), ext4 filesystem image and tarball (for flexible and creative deployment) +* It boots on a wide variety of hardware with sane defaults (e.g. serves a getty on the serial ports, brings up eth0 automatically, etc) and with some simple tools for driver testing +* It includes an SSH server for remote shell access and file transfer +* It has the ability to enrich the image with additional custom tools and libraries + +# Quickstart guide + +This step by step guide applies all of the ideas above to generate a root filesystem that meets all of the above for a 64-bit Armv8-A system: + +1. Install all the prerequisite host packages described in the [Development Tasks Manual](https://docs.yoctoproject.org/dev-manual/start.html#setting-up-a-native-linux-host). + +2. Download an appropriate poky branch (at the time of writing, kirkstone is the most suitable branch): + + ``` + git clone git://git.yoctoproject.org/poky -b kirkstone + ``` + +3. Source the environment variables required to build OpenEmbedded systems and, optionally, change the prompt for this session so that you can more easily keep track of which sessions have the environment configured to build/rebuild images. + + ``` + cd poky + . oe-init-build-env + PS1="[bitbake] $PS1" + ``` + + Note: *oe-init-build-env* will change your current working directory and, in order to run bitbake commands, this step will need to be repeated every time you start a new terminal session. + +4. Create a new machine configuration file inside the bitbake build directory: + + ``` + mkdir -p conf/machine + cat > conf/machine/v8a-arm64.conf <<'EOF' + #@TYPE: Machine + #@NAME: v8a-arm64 + #@DESCRIPTION: Generic Arm64 machine for generating a basic rootfs + + require conf/machine/include/arm/arch-armv8a.inc + + # Don't build an actual kernel + PREFERRED_PROVIDER_virtual/kernel ?= "linux-dummy" + + # Generate filesystem as initramfs, ext4 and tarball + IMAGE_FSTYPES ?= "cpio.gz cpio.xz ext4.gz tar.xz" + + # List the most common device names for serial ports on Arm systems + # (and use SERIAL_CONSOLES_CHECK to avoid errors for non-existent + # devices) + SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" + SERIAL_CONSOLES += "115200;ttyAMA0" + SERIAL_CONSOLES += "115200;ttyMSM0" + SERIAL_CONSOLES += "115200;hvc0" + SERIAL_CONSOLES_CHECK = "${SERIAL_CONSOLES}" + + # This is just a guess about what features the kernel has drivers + # for. It doesn’t matter it the kernel doesn’t actually implement + # everything here. + MACHINE_FEATURES:append = " alsa bluetooth rtc screen usbhost vfat wifi" + EOF + ``` + + The role of the machine configuration file is to tell OpenEmbedded what target device you are building for. This includes things like compiler tuning (this is handled by `arch-armv8a.inc`), choice of kernel, what image formats are best suited for the machine and what features the machine has or could have. The machine configuration is mostly one-off, unless you need to add support for additional names for the serial port (or for a whole different architecture) then you won’t need to edit this much after you have created it. + +5. Now we need to modify `local.conf` to adopt the above machine configuration. We will also use this to make personal (“local”) configuration tweaks to enrich the example images with additional features and packages: + + ``` + cp conf/local.conf conf/local.conf.bak + cat - conf/local.conf.bak > conf/local.conf <<'EOF' + # Choose the simple 64-bit Armv8-A machine we just created + MACHINE ?= "v8a-arm64" + + # Permit root login without a password + EXTRA_IMAGE_FEATURES += "debug-tweaks" + + # Enable SSH access + EXTRA_IMAGE_FEATURES += "ssh-server-dropbear" + + # Adding the init-ifupdown package ensures that eth0, if it exists, + # will be brought up at boot time using DHCP. + CORE_IMAGE_EXTRA_INSTALL += "init-ifupdown" + + # vim rocks... so lets add that to the package list too + CORE_IMAGE_EXTRA_INSTALL += "vim-tiny" + + # Conserve disk space during the build by remove working + # directories as we go + INHERIT += "rm_work" + + # + # Original contents of local.conf files + # + + EOF + ``` + +6. Done! All that is left is to kick off the build: + + ``` + bitbake core-image-base + ``` + +This initial run will take a long time to complete as the build system downloads and builds the required components. Even on relatively powerful machines with fast network connections then initial build times of an hour or more would not be unexpected. + +Once completed the resulting images can be found in `$BUILDDIR/tmp/deploy/images/v8a-arm64`. The root filesystem will have been prepared as an initramfs (use cpio.gz or cpio.xz files depending on which decompressors you have enabled in your kernel), a compressed ext4 filesystem and as a tarball. + +# Testing using qemu + +Qemu is usually an attractive tool to run a quick sanity test on your new userspace. Qemu is usually well supported by defconfig kernels meaning, if you have already got a kernel compiled and ready for testing then we are one command away from booting the new userspace: + +``` +qemu-system-aarch64 -nographic \ + -cpu cortex-a72 -M virt -smp 4 -m 2048 \ + -nographic -nic user,model=virtio \ + -kernel arch/arm64/boot/Image \ + -initrd $BUILDDIR/tmp/deploy/images/v8a-arm64/core-image-base-v8a-arm64.cpio.xz +[ 0.000000] Booting Linux on physical CPU 0x0000000000 [0x410fd083] +[ 0.000000] Linux version 6.0.0-rc1 (drt@maple) (gcc (Debian 12.1.0-8) 12.1.0, GNU ld (GNU Binutils for Debian) 2.38.90.20220713) #3 SMP PREEMPT Thu Aug 18 13:58:25 BST 2022 +… +[ 8.955913] Freeing unused kernel memory: 7168K +[ 8.976578] Run /init as init process +INIT: version 3.01 booting +Framebuffer /dev/fb0 not detected +Boot splashscreen disabled +Starting udev +[ 9.109321] udevd[146]: starting version 3.2.10 +[ 9.114671] udevd[147]: starting eudev-3.2.10 + +Poky (Yocto Project Reference Distro) 4.0 v8a-arm64 ttyAMA0 + +v8a-arm64 login: +``` + +## Aside: Injecting modules into initramfs images + +Kernel developers who are working with modular kernels and real filesystems (like ext4) can easily loop mount the filesystem and install modules directly into the filesystem. However it can be more of a challenge to inject modules into an initramfs image. Nevertheless with a little care this can be automated by getting kbuild to install the modules into a temporary directory: + +``` +# Cleanup (if needed) and then install the modules into a directory +rm -rf mod-rootfs +make modules_install INSTALL_MOD_PATH=$PWD/mod-rootfs INSTALL_MOD_STRIP=1 + +# Compress the module overlay as a separate cpio archive and append +# it to the rootfs image. The kernel will automatically unpack the +# concatenated archives into the initial ramfs. Note that the +# -Ccrc32 flag is required on recent distros to ensure the xz +# checksums are compatible with the kernel decompressor. +(cd mod-rootfs; find . | cpio --create -H newc --quiet | xz -cvT0 -Ccrc32) \ + | cat $BUILDDIR/tmp/deploy/images/v8a-arm64/core-image-base-v8a-arm64.cpio.xz - + > rootfs.cpio.xz +``` + +After using the above commands the combined archive can be found at `rootfs.cpio.xz` ready to be deployed to the target. + +# What about other architectures? + +In the quickstart guide above we used arm64 as an example but the concepts and principles apply equally to other architectures. For example we can easily adapt the above instructions to generate a machine description suitable for 32-bit Arm platforms. + +``` +cat > conf/machine/v7a-arm32.conf <<'EOF' +#@TYPE: Machine +#@NAME: v7a-arm32 +#@DESCRIPTION: Generic Armv7-A machine (with NEON) for generating a basic rootfs + +# Tuning for all of Armv7-A is slightly tricky (because there are +# multiple FPU configurations). This tuning is right for most (but +# not all of) the Cortex-A family. +DEFAULTTUNE ?= "armv7athf-neon" +require conf/machine/include/arm/arch-armv7a.inc + +# Don't build an actual kernel +PREFERRED_PROVIDER_virtual/kernel ?= "linux-dummy" + +# Generate filesystem as initramfs, ext4 and tarball +IMAGE_FSTYPES ?= "cpio.gz cpio.xz ext4.gz tar.xz" + +# List the most common device names for serial ports on Arm systems +# (and use SERIAL_CONSOLES_CHECK to avoid errors for non-existent +# devices) +SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" +SERIAL_CONSOLES += "115200;ttyAMA0" +SERIAL_CONSOLES += "115200;ttyMSM0" +SERIAL_CONSOLES += "115200;hvc0" +SERIAL_CONSOLES_CHECK = "${SERIAL_CONSOLES}" + +# This is just a guess about what features the kernel has drivers +# for. It doesn’t matter it the kernel doesn’t actually implement +# everything here. +MACHINE_FEATURES:append = " alsa bluetooth rtc screen usbhost vfat wifi" +EOF +``` + +Adding machine configurations that support other architectures is left as an exercise for the reader (hint: take a look at `$BUILDDIR/../meta/conf/machine/include`). Note also that users of x86 systems do not need to add any machine configuration at all since the Yocto Project BSP provides a genericx86-64 machine so simply setting `MACHINE = "genericx86-64"` in `local.conf` is enough to generate x86-64 images! + +# Coming soon + +This post is part one of a two-part series. + +So far you have learned how to build a simple, compact and useful userspace which weighs in at about 12MB (compressed). The comments we added to `local.conf` should give you some clues about how to extend the image with additional features. For example adding the following to `local.conf` does exactly what you (hopefully) think it does: + +``` +CORE_IMAGE_EXTRA_INSTALL += "alsa-tools socat stress-ng rt-tests" +``` + +However even with the extensive commenting there are plenty of customization tricks I haven’t shared yet. Look out for part two where we’ll cover other ways to tweak the local configuration to make things either more compact or more feature rich. + +*** + +*Daniel Thompson is the technical lead for the Support and Solutions team at Linaro and, among other things, leads the [Linaro training activities](https://www.linaro.org/services/hands-on-training/) for both our members and our services customers. He is an experienced kernel developer with a long interest in cross-compiled GNU/Linux distributions. He frequently delivers both our OpenEmbedded/Yocto training and our intermediate and advanced courses on Linux kernel development and debugging.* diff --git a/src/content/blogs/optimizing-tensorflow-convolution-performance-on-aarch64.mdx b/src/content/blogs/optimizing-tensorflow-convolution-performance-on-aarch64.mdx new file mode 100644 index 0000000..a2cce50 --- /dev/null +++ b/src/content/blogs/optimizing-tensorflow-convolution-performance-on-aarch64.mdx @@ -0,0 +1,64 @@ +--- +title: Optimizing TensorFlow Convolution Performance on Aarch64 +description: In this blog, Linaro engineer Everton Constantino talks about how + Linaro improved TensorFlow convolution performance on Aarch64. +date: 2021-03-11T10:19:36.000Z +image: linaro-website/images/blog/machine_intelligence_cover +tags: + - windows-on-arm + - ai-ml + - arm +related_projects: + - AI +author: everton-constantino +related: [] + +--- + +# Introduction + +Google’s [Tensorflow](https://github.com/tensorflow/tensorflow) is the industry standard for AI/ML and has been around since 2015. From cellphones to industrial applications, it has a ubiquitous presence wherever machine learning, especially deep neural networks, is required. As Linaro is always investing in the growth of Arm’s ecosystem, a great amount of time is being spent producing a better optimized and higher performance version of Tensorflow. Here we discuss how this was achieved improving general matrix multiplication times on Eigen. Using our experimental branch Tensorflow is able to achieve up to 7% better performance on one of [MLPerf](https://mlcommons.org/en/) benchmarks and up to 15% on basic matrix multiplication benchmarks. + +Tensorflow’s name comes from the mathematical object called tensors. They are algebraic objects that define multi-linear transformations between vector spaces. As fancy as that sounds, vectors and matrices can be seen as rank 1 and 2 tensors respectively. Images are an example of tensors, an image of width W, height H and 3-channels of colour (R, G, B), is a rank-3 tensor (WxHx3). Usually, neural networks with images as inputs are of a particular type called CNN, convolutional neural networks. + +## Convolution in Tensorflow + +In Tensorflow the convolution is done via a sequence of reshape and contract operations which boils down to usual matrix multiplication. In order for this transformation to give appropriate results, a well known conversion step known as im2col is applied which projects higher ranking tensors into matrices. The classical im2col algorithm requires a considerable amount of memory usage because of it’s redundant nature, the diagram below outlines the general procedure. + +![im2col algorithym](/linaro-website/images/blog/im2col-algorithym) + +Tensorflow uses a hybrid alternative with virtual tensors. Once the projection is done, general matrix multiplication can be used to calculate the convolution. This is usually a very fast alternative to directly calculating the convolution because gemm libraries are highly optimized. + +This approach moves the core of the computation outside of Tensorflow directly into [Eigen](https://www.google.com/url?q=https://eigen.tuxfamily.org/index.php?title%3DMain_Page\&sa=D\&source=editors\&ust=1615459359355000\&usg=AOvVaw3DidHfVjbmXgD0liFjP1tf), the linear algebra library used to perform the matrix multiplication. + +## Eigen’s matrix multiplication + +Eigen is an easy to use open source C++ highly-templated linear algebra library. Implemented closely, the current matrix multiplication algorithm follows [Goto’s seminal paper](https://www.cs.utexas.edu/users/flame/pubs/GotoTOMS_final.pdf) (Goto). The idea is to break both matrices to be multiplied into a set of blocks and panels to optimize cache performance. The paper outlines just how to perform the blocking procedure. However, the packing of blocks to the gemm kernel is not completely described since that is highly architecture dependent. Here is where our work started.The current implementation was optimized some years ago, taking Intel’s Haswell CPU into account and failing to meet peak performance on current Arm processors. + +Several papers already discuss how the memory layout of data can affect load/store times. Although it is widely believed that random access memory latency time is independent of the location of data, this is a misconception. As on hard drives, [RAM also benefits from sequential access](https://developers.redhat.com/blog/2019/04/02/how-data-layout-affects-memory-performance/), and not only that, but usual cache policies are better behaved if sequential access is used. + +The first step of the process was to enhance sequential access on the kernel. For that it was required to redesign Eigen’s packing. We had another reason in mind though, as Linaro looks to the future, we know that Arm is moving towards a new set of matrix multiplication instructions. We therefore needed to give room to other operations inside the kernel beyond the inner product and for that, the packing layout needed to change. + +Once in control of the packing, the next step was to look into NEON’s SIMD instructions. For that we rewrote the micro-kernels, enhancing maintainability and flexibility. This gave us the opportunity to explore the pipeline better, now having an approach that resembles [OpenBLAS](https://github.com/xianyi/OpenBLAS) with 4x4, 8x4, 12x4 and 16x4 micro-kernels. + +Careful use of perf along with a deep knowledge of Arm’s CPU architecture resulted in a version better suited to both cache and pipeline exploitation. Of course, our new approach opens doors to further optimization, as mentioned earlier our branch is only experimental in nature at the time. + +## Results + +The machine used to test was a Cavium ThunderX1 with 16Gb of RAM. The matrix multiplication performance measurements were done via google benchmark on both float32 square and rectangular matrices ranging from 8x8x8 to 4096x4096x4096. Just matrix multiplication, depending on the shape of both matrices, saw an improvement ranging from 5% to 15%. Tensorflow numbers were extracted from a subset of the MLPerf benchmarks and saw up to 7% improvement. + +The experiment consisted of a classification task taken from resnet50. We executed each test 1024 times for Tensorflow taken from master at 02/02/2021 and compiled with the current Eigen archive and then the same source code with our updated GEMM kernel. Average execution time per run for master was 0.37 seconds with a standard deviation of 0.005 against our version with 0.34 seconds and standard deviation of 0.005 as well. The total execution time for master was 377.38 seconds and on our version 353.96 seconds. Basic statistical tests reject the null hypothesis. + +![MLPerf Convolution benchmark](/linaro-website/images/blog/mlperf-convolution-benchmark) + +### Conclusion and future work + +Exploring a CPU’s top performance is never an easy task, requiring lots of knowledge and research. Matrix multiplication is one of the only examples of an algorithm that can really stress the CPU reaching more than 90% of usage, so knowing every detail from pipelining to the memory model and beyond is essential.Linaro showed it has done just that. + +This work also opens the door to explore new instructions, like matrix multiplication specific instructions and plants the seed on Eigen to support mixed precision. The lack of support for mixed precision requires Tensorflow to resort to other libraries since quantization on neural networks is more and more important. + +#### Bibliography + +Goto, Kazushige. “Anatomy of High Performance Matrix Multiplication.” ACM Transactions on Mathematical Software, https://www.cs.utexas.edu/users/flame/pubs/GotoTOMS\_final.pdf. + +[Click here](https://www.linaro.org/contact/) to contact us to find out more about Linaro. diff --git a/src/content/blogs/porting-common-linux-tools-into-morello-architecture.mdx b/src/content/blogs/porting-common-linux-tools-into-morello-architecture.mdx new file mode 100644 index 0000000..9a11822 --- /dev/null +++ b/src/content/blogs/porting-common-linux-tools-into-morello-architecture.mdx @@ -0,0 +1,54 @@ +--- +title: Porting Linux tools into Morello Architecture +description: In this blog, Linaro interns talk about the work they did to help + identify how easy (or difficult) Morello is to get started with. Read more + here. +date: 2021-06-23T08:18:44.000Z +image: linaro-website/images/blog/Chip_background_UNDER_2MB +tags: + - arm + - security + - u-boot +author: linaro +related: [] + +--- + +## Introduction + +[Morello](https://www.arm.com/architecture/cpu/morello) is a research program developed by Arm in collaboration with the University of Cambridge. Its aim is to develop and use a Capability Hardware Enhanced RISC Instructions (CHERI) architecture. Morello makes use of non-software-forgeable 128(+1)-bit Capabilities, which should be able to limit how references are used. This has the potential to improve memory safety, which should, in turn, make future systems more secure. Capabilities can also help with software compartmentalization, ensuring that the effects of an attack are less severe. In order to test the architecture, there is currently a Morello compatible Android build. + +Since Morello is a research project, it has many loose ends and open questions. A key element for new software projects is that they’re somewhat easy to get started with and work with. The purpose of the work undertaken by Linaro interns Lorenzo Carletti and Camilla Memola (overseen by Joakim Bech, Distinguished Engineer at Linaro) was to understand where Morello falls on the scale. Is it really complicated and cumbersome to work with or is it something that is fairly easy to get started with? By working on the Morello project, the Linaro interns were able to report back to the people running the project as to what work was needed to simplify getting started. + +## Porting cURL to Morello + +By Lorenzo Carletti + +Morello's Android build can run programs in 64-bit and 32-bit modes, but software compiled to run like that does not make full use of CHERI Capabilities. To ensure that the Capabilities are used, one has to compile software with the capabilities enabled. + +The chosen target was [cURL](https://curl.se/), a command-line tool for getting or sending data, including files, using URL syntax. The porting process to CHERI Capabilities was pretty smooth, however it did highlight some issues with the current environment. BoringSSL, which is Android's SSL library, had alignment issues, both at compile-time and at runtime. The lack of gdb support and other debugging tools made it harder than it needed to be to understand what was wrong with it, as cURL would just crash with a SIGBUS as soon as it was started, and other traditional debugging methods (like using printf) wouldn't work. Other than that, it was a success. + +One can find the curl port's Merge Request here: + +[https://git.morello-project.org/morello/android/platform/external/curl/-/merge\_requests/1](https://git.morello-project.org/morello/android/platform/external/curl/-/merge_requests/1) + +## [](https://git.morello-project.org/morello/android/platform/external/curl/-/merge_requests/1)Porting wget to Morello + +By Camilla Memola + +In order to use the full potential of CHERI Capabilities, any tool has to be compiled with +capabilities enabled using specific flags. The goal of this contribution was to port tools that are used on a daily basis in Linux to Morello with CHERI Capabilities. + +The ported tool was [wget](https://www.gnu.org/software/wget/), a command-line tool for retrieving files using HTTP, HTTPS, FTP and FTPS, the most widely used Internet protocols. The porting process to CHERI Capabilities was executed without a lot of issues, but a few problems occurred that resulted in adjustment in the source code in order to be able to port the code to Morello architecture with capabilities enabled. + +## Conclusion + +With some initial guidance it wasn’t too difficult to port applications to run as pure capability binaries if you are somewhat used to working with an Android/AOSP build environment. + +We set no rules about the tools and applications to port, but we ended up with two that are used a lot on a daily basis, namely curl and wget. Officially more than [250 companies](https://curl.se/docs/companies.html) are using curl in one way or another. So, it made a lot of sense to see what it’d take to port curl to run as a pure capability application in Morello. Wget is a similar tool, probably not as popular as curl, but wget is something that lots of people are using in shell-scripts etc, so definitely a good choice. + +## How to get involved with the Morello project + +To begin with external contributors had to sign a contributors agreement to get involved with the project. This is no longer required, making it easier for future developers interested in the project to get involved. To see what is currently being worked on or to submit merge requests, click on the Morello Project’s Gitlab link here: [https://git.morello-project.org/morello](https://git.morello-project.org/morello) + +For more information on Linaro and the work we do, [contact us here](https://www.linaro.org/contact/). diff --git a/src/content/blogs/porting-linux-to-aarch64-laptops.mdx b/src/content/blogs/porting-linux-to-aarch64-laptops.mdx new file mode 100644 index 0000000..c397deb --- /dev/null +++ b/src/content/blogs/porting-linux-to-aarch64-laptops.mdx @@ -0,0 +1,40 @@ +--- +title: Porting Linux to AArch64 Laptops +description: In this article, Linaro Engineer Lee Jones talks about the work he + has done in porting Linux to AArch64 laptops. Read about his findings here! +date: 2019-09-26T00:00:00.000Z +image: linaro-website/images/blog/porting-linux-featured-image +tags: + - arm + - linux-kernel + - open-source +author: lee-jones +related: [] + +--- + +As the AArch64 Laptops collaboration between Linaro and Arm is wrapping up, we felt it would be helpful to summarise the project and take a quick victory lap. + +Since late last year, on and off, we have been trying to find specification compliant methods of booting Linux distributions on a series of laptops based on various Qualcomm Snapdragon System-on-Chips (SoC). These laptops left the factory running a bespoke version of Windows 10 called Windows on Snapdragon. Our brief was to try and "fix" them to run mainline Linux as well. + +The beginning of the project saw us gather together a bunch of laptops based on the Snapdragon 835. Which, in no particular (okay, maybe alphabetical) order were; ASUS NovaGo TP370QL, HP Envy x2 and Lenovo Mixx 630. The laptops implement UEFI support so we disabled Secure Boot, inserted a bootable SD card containing Grub for AArch64 and ... nothing. No error message. Not a sausage! Just a black screen. Thankfully, after a long period of head scratching some bright soul discovered that a bug in the firmware requires the bootable binary, Grub in this case, to be compiled 4k aligned for the firmware to take any notice of it. What we saw (or were not seeing) was UEFI Boot Services rejecting Grub as a viable binary. + +Now in the possession of a working bootloader we were able to take advantage of the upstreaming effort undertaken by the Linaro Qualcomm Landing Team and Qualcomm themselves to boot into a Linux initramfs. This was a real turning point and marked the transition from a skunk-works task into a worthy and notable project in its own right. + +Once we knew it was possible to boot Linux on these devices, the project ramped up and a public repository was created on GitHub called AArch64-Laptops (https://github.com/aarch64-laptops/build). Here we provided a build system for creating pre-built images based on Ubuntu. We also provided documentation describing how to disable Secure Boot on each device, how to build images and get them booted via the SD card. After the project had matured, the GitHub project was used store HOWTOs on the GPU and WiFi stacks and some troubleshooting information for good measure. A community was cultivated surrounding the project. Users submitted bugs & questions and discussed technical topics on the IRC channel (aarch64-laptops @ Freenode) and mailing list (https://lists.linaro.org/mailman3/lists/aarch64-laptops.lists.linaro.org/). + +News of the project spread fast, mainly due to the exposure provided by various popular online technical journals and blogs; Liliputing ([https://liliputing.com/2019/02/now-you-can-run-linux-on-some-arm-laptops-designed-for-windows-10-on-arm.html](https://liliputing.com/2019/02/now-you-can-run-linux-on-some-arm-laptops-designed-for-windows-10-on-arm.html)), Phoronix ([https://www.phoronix.com/scan.php?page=news\_item\&px=Linux-On-The-Win-Arm-Laptops](https://www.phoronix.com/scan.php?page=news_item\&px=Linux-On-The-Win-Arm-Laptops)), Foss Bytes ([https://fossbytes.com/linux-on-windows-10-arm-laptops-project/](https://fossbytes.com/linux-on-windows-10-arm-laptops-project/)) and Tech Republic, to name but a few. It is likely that many of the current owners bought their hardware on the back of the apparent early successes of the project. + +The project subsequently spent quite a bit of time enabling features; Graphics, USB, core Wireless support, UFS (on-board storage), etc and fixing bugs; SD card detect line inversion, touchpad, keyboard, graphics, MMU (efi=novamap \[thank you Ard Biesheuvel) and adding documentation whenever necessary. Once we were able to boot into an Ubuntu Desktop, an automatic Docker/Libvirt based (Libvirt virtual machine inside a Docker container) building infrastructure was built to create new images as they became more featureful and more stable. We utilised the power of PPAs (Personal Package Archives) to keep the user's kernel up-do-date with the latest features and Canonical were kind enough to allow us to distribute test images on Linaro's Release site ([http://releases.linaro.org/aarch64-laptops](http://www.96boards.org/)). + +After a very productive and engaging meeting with representatives from; Linaro, Arm and Qualcomm at Connect BKK19, the project began to focus more on the newly released Lenovo Yoga C630, which featured an upgraded Snapdragon 850. Fortunately, Bjorn Andersson from the Linaro Qualcomm Landing Team had been in possession of one of these for a while and was last seen running a bespoke version of Arch Linux. Qualcomm was kind enough to permit Jeff Hugo and Bjorn to contribute towards the project in their spare time. Both were a great help to the project and continue to be active in the community to this day. + +An attempt was made to reach out to some of the more well known companies offering Linux distributions, with varying degrees of success. Dimitri John Ledkov from Canonical was particularly helpful, volunteering his own time and technical prowess to create an intuitive installer, based on our kernel images and bootloader, which allows users to install Ubuntu onto UFS (on-board storage). He has also been actively upstreaming kernel patches which did not make Linux v5.3 (the expected kernel release for Eoan 19.10) into the Ubuntu kernel. + +Some distributions refused point-blank to support these laptops using Device Tree (the primary and most featureful method of booting these devices), whether they were upstream or not. So we investigated other methods of booting off-the-shelf distro installers. Leif Lindholm and myself worked on a DTBLoader EFI module which offered up a previously saved, known working DTB in the case where one was not present (i.e. when booting distro installers lacking DTBs). Leif was a great help in this endeavour. The other option available to us was to enable ACPI. A potentially daunting task with a great many people expecting a booting solution to be many engineering-months of effort and with some expecting the task to be impossible without direct assistance from Qualcomm and/or Microsoft. Despite the expected difficulties, from Linux v5.4 it will be possible to boot to a Ubuntu Desktop utilising ACPI alone. It's not as featureful as the DT solution, not by a long shot, but it should help users to at least install a Linux Distro and switch over to booting with DT for subsequent boots. + +One thing worth mentioning, before everyone goes out and purchases one of these devices (since they are a great platform to develop ARM-on-ARM and the like), there is still a little niggle with the on-board WiFi. It has been seen working, but it has a habit of restarting the machine. Particularly when running a GUI (X and Wayland). This is something Bjorn is currently working on and trying to understand. He’s been lobbying for information, but to no avail, yet! Once WiFi is up and running, this will be an outstanding little laptop. A few of us have mentioned switching to it as our daily driver (since it even has a really nice keyboard, which is unusual). + +Apologies if you have contributed to this project and you have not been mentioned. There were so many very helpful people who took time out of their own lives to be a part of this project, it would be difficult to name everyone involved. + +In closing, this project has been a great success. We've come a long way since the beginning and have developed a very usable platform, whether you are a core developer or more of a web surfer. To have a laptop of this calibre running ARMv8 and Linux on my desk is a delight. Hopefully we can fix the niggles and you will see them popping up on ARM and Linaro engineer's laps sometime soon. diff --git a/src/content/blogs/profiling-python-and-compiled-code-with-linaro-forge-and-a-performance-surprise.mdx b/src/content/blogs/profiling-python-and-compiled-code-with-linaro-forge-and-a-performance-surprise.mdx new file mode 100644 index 0000000..15ffa08 --- /dev/null +++ b/src/content/blogs/profiling-python-and-compiled-code-with-linaro-forge-and-a-performance-surprise.mdx @@ -0,0 +1,43 @@ +--- +title: Profiling Python and compiled code with Linaro Forge – and a performance + surprise +description: In this blog we talk about how to profile Python and compiled code + with Linaro Forge. Read more here! +date: 2018-03-28T02:32:38.000Z +image: linaro-website/images/blog/hpc-bg +tags: + - hpc +author: patrick-wohlschlegel +related: [] + +--- + +If you are developing HPC applications, there is a good chance that you have been in contact with Python these days. Whether you use Python to orchestrate large workflows, to quickly put together small prototypes, to visualize data or even to create actual simulations, you’ve likely either used or written Python code at some point in your day job. + +Python brings a lot of advantages, such as its capacity to enable productivity, but it is often described as being slow when it comes to performance. Developers typically assume that most of the execution time is spent in compiled, optimized C/C++ or Fortran libraries (e.g. NumPy) which are called from Python. But is that truly the case? How confident are you that your application is not wasting your precious computing resources for the wrong reasons? + +In Linaro Forge and Linaro Performance Reports 19.0, we have added the Python profiling capabilities you need to hunt down and resolve bottlenecks for your Python codes in the blink of an eye and at scale. Too good to be true? Let’s get to it using our profiling tool, Linaro MAP! + +First off, profile your application like you always have, using the following command: + +`map --profile mpirun -n 2 python ./demo.py` + +This command generates the profile information you need. Let’s open it up with the command: + +`map ./profile.map` + +![profiling python example 1](/linaro-website/images/blog/profiling-python-example-1) + +If your code spends time in the Python interpreter, the information will be plotted in pink in the graphical user interface. In this particular example, we realize fairly quickly that we are spending the vast majority of the execution time in the python interpreter. That’s not what we expected! Actually, an innocuous multiplication in a loop is taking most of our time! We can do better! + +By simply replacing this line of code by a call to `numpy.multiply()`we manage to replace operations performed by the interpreter by a compiled library call. How does this impact the efficiency of our code? Quickly profiling the new application with Linaro MAP gives the following: + +![profiling python example 2](/linaro-website/images/blog/profiling-python-example-2) + +What a change! We now spend only 1% of the time in the Python interpreter (down from 80.2%) and the small loop runs in a fraction of the time. Within just five minutes, we have been able to run the same code more than 10 times faster (from 41.2 seconds down to 3.6 seconds). + +And this is just one of the problems you can resolve. Better load balancing of large workflows orchestrated by Python frameworks, more intelligent data accesses… The pitfalls Linaro Forge 19.0 can help you avoid are countless. + +As usual, this feature is available on any hardware architecture. If you are interested, simply download the latest Forge and Performance Reports builds and install it on your cluster. Use your existing licence. If you are not yet part of the Forge tools family, do feel free to request a [temporary trial licence](https://www.linaroforge.com/freeTrial/) or give us a shout at [Contact us](https://www.linaroforge.com/contactUs/). The whole team looks forward to hearing from you. + +You can view our Python profiling webinar [here](https://www.youtube.com/watch?v=kJYrE4Yu5WU). diff --git a/src/content/blogs/protected-uefi-variables-with-u-boot.mdx b/src/content/blogs/protected-uefi-variables-with-u-boot.mdx new file mode 100644 index 0000000..fd98297 --- /dev/null +++ b/src/content/blogs/protected-uefi-variables-with-u-boot.mdx @@ -0,0 +1,223 @@ +--- +title: Protected UEFI Variables With U-Boot +description: In this article, Ilias Apalodimas takes a detailed look at + Protected UEFI Variables With U-Boot. Read about his findings here! +date: 2021-01-04T02:16:09.000Z +image: linaro-website/images/blog/tech_background +tags: + - security + - u-boot + - arm +related_projects: + - LKQ +author: ilias-apalodimas +related: [] + +--- + +## **Intro** + +Critical system variables, like the UEFI ones, must be protected against a variety of attacks. On Arm servers and desktops, which typically run EDK2, dedicated flashes are used. Those would normally be accessible from the secure world only, since they are storing critical variables for our systems integrity and security. + +What about smaller embedded systems though? Those don't typically run EDK2 nor do they have special dedicated flashes. Those systems usually use U-Boot. Prior to 2019 U-Boot was using it's environment to store EFI variables. Although that was fine for the initial UEFI implementation, it imposed limitations to platforms that wanted to store variables securely and in the long run, implement UEFI Secure Boot. + +Embedded devices with a dedicated flash in Secure World are rare though (anyone aware of any?). What's becoming more common though is eMMC flashes with an RPMB partition. Wouldn't it be nice to store the EFI variables in that? We would then inherit the RPMB Authentication and protection against Replay Attacks and use a non-volatile storage we trust more due to it's built-in security characteristics. + +## **More problems** + +In the Arm ecosystem and it's Trusted Firmware you have, up to now (and prior to Arm8.4), two ways of dispatching payloads to the Secure world. The first one is called ***Secure Partition Manager*** or in short SPM. This is what EDK2 uses, when compiled for Arm, to spawn ***StandAloneMM***, the component used for the variable management and storage. + +The second one is called SPD or ***Secure Payload Dispatcher***. This is what OP-TEE is using today. The problem is that those two are mutually exclusive. So you can either store EFI variables securely or run OP-TEE. Small devices, with limited hardware have a lot to gain when using a secure OS though. The first thing that comes in mind is running a FirmwareTPM or a secure client that takes care of the on-boarding process for small IoT devices. + +## **Less code to the rescue** + +We could of course rewrite StandAloneMM as a Trusted Application for OP-TEE. The application is huge though, the final binary for EDK2 is \~2.5MB and quite complex. Wouldn't we be better off with an application that's been working for a couple of years? But can we run it directly in OP-TEE? That way we can get the best of both worlds. Re-use an existing application which will manage our variables securely and maintain the ability to run a Secure OS. + +It turns out that the StandAloneMM binary is self-relocatable, so as long as we manage to jump on the first instruction, everything will just 'work'. We would of course need code in OP-TEE to launch the new partition and in U-Boot to communicate with that partition, but that should be way less, or at least that's what we assumed. + +And less it was! + +[OP-TEE](https://github.com/OP-TEE/optee_os/commit/42471ecf25b7) and [U-Boot](https://github.com/u-boot/u-boot/commit/f042e47e8fb4) already got patches for that and EDK2 patches are currently on upstream review. + +## **Combining it all together** + +So far I've talked about variable management and internal details of Arm's Secure World. We've also managed to run StandAloneMM as part of OP-TEE, but who's responsible for reading and storing the variables eventually? + +StandAloneMM includes the driver that implements the accesses to our hardware. EDK2 calls this ***Firmware Volume Block Protocol*** and it's designed to provide control over block-oriented firmware devices. So the missing link is a StandAloneMM FVB that can re-use OP-TEE and it's ability to access our RPMB partition securely, something like [this.](https://git.linaro.org/people/ilias.apalodimas/edk2-platforms.git/tree/Drivers/OpTeeRpmb/OpTeeRpmbFvb.c?h=ffa_svc_optional_on_upstream) + +If you combine all of the above, the final architecture looks like this: + +![OP-TEE, U-BOOT & Firmware volume Block Protocol architecture diagram](/linaro-website/images/blog/firmware-volume-block-protocol-architecture) + +## Building + +### Building TFA + +``` +mkdir firmware && cd firmware/ +git clone https://github.com/ARM-software/arm-trusted-firmware +pushd arm-trusted-firmware/ +make CROSS_COMPILE=aarch64-linux-gnu- ARCH=aarch64 PLAT= TARGET_BOARD= SPD=opteed +popd +``` + +### **Building EDK2** + +At the time of this article the EDK2 patchset is under review + +``` +git clone https://git.linaro.org/people/ilias.apalodimas/edk2.git -b ffa_svc_optional_on_upstream +git clone https://git.linaro.org/people/ilias.apalodimas/edk2-platforms.git -b ffa_svc_optional_on_upstream +export WORKSPACE=$(pwd) +export PACKAGES_PATH=$WORKSPACE/edk2:$WORKSPACE/edk2-platforms +export ACTIVE_PLATFORM="Platform/StMMRpmb/PlatformStandaloneMm.dsc" +export GCC5_AARCH64_PREFIX=aarch64-linux-gnu- +pushd edk2/ +git submodule init && git submodule update --init --recursive +popd +source edk2/edksetup.sh +make -C edk2/BaseTools +build -p $ACTIVE_PLATFORM -b RELEASE -a AARCH64 -t GCC5 -n $(nproc) +``` + +The StandAloneMM binary is located at *Build/MmStandaloneRpmb/RELEASE\_GCC5/FV/BL32\_AP\_MM.fd* + +### **Building OP-TEE** + +``` +git clone https://github.com/OP-TEE/optee_os.git +cp Build/MmStandaloneRpmb/RELEASE_GCC5/FV/BL32_AP_MM.fd optee_os/ +pushd optee_os/ +export ARCH=arm +CROSS_COMPILE32=arm-linux-gnueabihf- make -j32 CFG_ARM64_core=y PLATFORM= \ + CFG_STMM_PATH=BL32_AP_MM.fd CFG_RPMB_FS=y CFG_RPMB_FS_DEV_ID= \ + CFG_CORE_HEAP_SIZE=524288 CFG_CORE_DYN_SHM=y CFG_RPMB_WRITE_KEY=1 \ + CFG_REE_FS=n CFG_CORE_ARM64_PA_BITS=48 CFG_SCTLR_ALIGNMENT_CHECK=n \ +popd +``` + +***CAUTION***: OP-TEE will program the RPMB key (which is one time programmable). If your platform port of OP-TEE doesn't have a way of retrieving a secure key from the hardware you might end up with the default `CFG_RPMB_TESTKEY`. + +### **Building U-Boot** + +You'll need to enable U-Boot's extra configuration options to enable this. So clone U-Boot, apply your board defconfig and enable these options: + +``` +git clone https://github.com/u-boot/u-boot.git +pushd u-boot/ +export CROSS_COMPILE=aarch64-linux-gnu- +export ARCH=arm64 +pushd u-boot +make menuconfig ---> Enable the required options + +CONFIG_OPTEE=y +CONFIG_CMD_OPTEE_RPMB=y +CONFIG_EFI_MM_COMM_TEE=y + +make -j $(nproc) +popd +``` + +**NOTE**: U-Boot currently only supports dynamic shared memory to communicate with OP-TEE. Your board's OP-TEE port should register that memory in **./core/arch/arm/plat-platform/main.c** or define it in your platform DTS file. + +``` +register_ddr(DRAM0_BASE, DRAM0_SIZE); ---> replace with your board specific ranges +``` + +You'll otherwise get an error trying to probe OP-TEE. + +## **Assembling the final image and testing** + +Each board has, unfortunately, it's own way of creating the final firmware image. The build steps for EDK2 and Arm Trusted Firmware should be standard. For assembling the final image containing TF-A, OP-TEE and U-Boot refer to the vendor manual. + +On your first boot, if the RPMB key is not programmed, OP-TEE will do that for you. + +``` +D/TC:?? 00 tee_rpmb_write_and_verify_key:1069 RPMB INIT: Writing Key value: +D/TC:?? 00 tee_rpmb_write_and_verify_key:1070 00000000fc142dc0 xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx +D/TC:?? 00 tee_rpmb_write_and_verify_key:1070 00000000fc142dd0 xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx +``` + +### **Print variables** + +Notice OP-TEE probing before accessing the variables **OP-TEE: revision 3.11 (e6e7781f** + +``` +=> printenv -e +Found 2 disks +OP-TEE: revision 3.11 (e6e7781f) +SetupMode: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x1 +SignatureSupport: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x40 +SecureBoot: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x1 +certdbv: + d9bee56e-75dc-49d9-b4d7-b534210f637a + 2103-11-19 18:43:00 + BS|RT|AT|RO, DataSize = 0x4 +AuditMode: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x1 +DeployedMode: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x1 +VendorKeys: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x1 +PlatformLangCodes: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x6 +OsIndicationsSupported: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + BS|RT|RO, DataSize = 0x8 +CustomMode: + c076ec0c-7028-4399-a072-71ee5c448b9f + NV|BS, DataSize = 0x1 +certdb: + d9bee56e-75dc-49d9-b4d7-b534210f637a + 2103-11-19 18:43:00 + NV|BS|RT|AT|RO, DataSize = 0x4 +VendorKeysNv: + 9073e4e0-60ec-4b6e-9903-4c223c260f3c + 2103-11-19 18:43:00 + NV|BS|AT|RO, DataSize = 0x1 +PlatformLang: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + NV|BS|RT, DataSize = 0x6 +Boot0000: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + NV|BS|RT, DataSize = 0x78 +BootOrder: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + NV|BS|RT, DataSize = 0x2 +=> +``` + +### **Set/Get a variable** + +``` +=> setenv -e -nv -bs -rt test2 test2 +=> printenv -e test2 +test2: + 8be4df61-93ca-11d2-aa0d-00e098032b8c EFI_GLOBAL_VARIABLE_GUID + NV|BS|RT, DataSize = 0x5 +=> +``` + +### **Check available storage** + +``` +=> efidebug query -bs -rt -nv +Max storage size 16284 +Remaining storage size 15188 +Max variable size 8132 +=> +``` + +### Conclusion + +For more information on the work we do to secure embedded devices, read about our projects Trusted Substrate and LEDGE Reference Platform on the [Automotive, IoT & Edge Devices page](https://www.linaro.org/automotive-iot-and-edge-devices/). Alternatively, [contact us here](https://www.linaro.org/contact/). diff --git a/src/content/blogs/protecting-security-critical-firmware.mdx b/src/content/blogs/protecting-security-critical-firmware.mdx new file mode 100644 index 0000000..9ef4419 --- /dev/null +++ b/src/content/blogs/protecting-security-critical-firmware.mdx @@ -0,0 +1,113 @@ +--- +title: Protecting Security Critical Firmware +description: "This blog looks at the work Linaro has done to make it easier for + silicon vendors to enable firmware encryption with minimal platform plumbing. + " +date: 2022-03-03T12:50:35.000Z +image: linaro-website/images/blog/Linaro-and-Riscure-release-banner +tags: + - security + - open-source +author: sumit-garg +related: [] + +--- + +# Introduction + +Security is not a turn key solution but rather made of many different components. There is no such thing as “a secure system”, only secure enough. Some security features offer confidentiality and integrity protection, whilst others are there to make it harder for an attacker to launch an attack. Firmware encryption is a security feature that makes it harder for an attacker to reverse engineer the firmware. It can be used to armour several possible assets which can be present in a firmware image. The first asset could be the software IP implemented as part of firmware. The second one could be the secret keys which may be part of the firmware image. And the last one could be the firmware implementation details to make it harder to develop exploits for any vulnerabilities present in the firmware. + +In this blog, we will look back at an abstraction layer we introduced in Trusted Firmware-A (TF-A) and Open Portable Trusted Execution Environment (OP-TEE) to support firmware encryption. This has made it easier for silicon vendors to enable firmware encryption with minimal platform plumbing and reduce downstream firmware maintenance burden. There was [a session](https://www.youtube.com/watch?v=JJjCUSDKb30) around this at Linaro virtual connect 2020 but due to the pandemic we never discussed this feature as widely as we could have. We have decided to correct that in this blog post! + +# Firmware encryption + +Firmware encryption is designed to achieve confidentiality and integrity properties for a particular firmware image. When introducing firmware encryption, a couple of the design goals were clear right from the start. Firstly, we wanted to leverage a symmetric encryption technique since asymmetric encryption can be slow with the device boot time being a major limiting factor. Secondly, we decided to employ authenticated encryption techniques in order to ensure integrity of encrypted firmware blobs. + +![Firmware Encryption](/linaro-website/images/blog/firmware-encryption) + +One major driver for firmware encryption was the emerging robustness requirements for software Digital Rights Management (DRM) implementations. These requirements vary from vendor to vendor but may include requirements for additional barriers to make reverse engineering more difficult. Firmware encryption provides a reference implementation that can be used to address these requirements. + +On Armv8 systems, DRM software generally runs as a Trusted Application (TA) running in a TEE environment, perhaps augmented with some hardware specific drivers in Trusted OS. Hence, to provide firmware encryption, we must add support in TF-A to decrypt Trusted OS payloads (BL32) which, for OP-TEE, consists of the OS itself together with any bundled TAs (including pseudo TAs). Additionally OP-TEE must have support for decrypting TAs that are loaded from the Linux file system. In both cases the pre-existing loaders supported authentication but not decryption. + +# Encryption + Signature? + +The most commonly used cryptographic technique while implementing secure boot is firmware signature. This technique allows only authorized firmware to execute on a particular platform. Firmware signature ensures authentication, integrity, authorization and non-repudiation properties on behalf of the OEM / Service provider. The only security property left out is confidentiality which is ensured by firmware encryption. So there is a need for a combination of signature and encryption techniques to ensure all security properties for a firmware. + +[Prior studies](https://theworld.com/~dtd/sign_encrypt/sign_encrypt7.html) have shown threats to ill-thought-out combinations of signature and encryption techniques as discussed in the next section. + +## Encrypt-then-sign + +![class=medium-inline left Encrypt then sign image](/linaro-website/images/blog/encrypt-then-sign-image) + +Security Properties: + +1. Confidentiality +2. Integrity +3. Authentication +4. Authorization + +Shortcomings: + +* Only encrypted firmware blobs are non-repudiable to OEM / SP. +* Signing an encrypted blob makes it immutable. Also, it doesn’t allow re-encryption on devices, aka firmware binding. + +## Sign-then-encrypt + +![class=medium-inline left Encrypt then sign image](/linaro-website/images/blog/sign-then-encrypt-image) + +Security properties: + +1. Confidentiality +2. Authentication +3. Authorization +4. Non-repudiation + +Shortcomings: + +* Plain encryption doesn’t assure integrity of encrypted blobs. +* Vulnerable to Chosen Ciphertext Attacks (CCAs). + +## Sign-then-encrypt-then-MAC (the one we adopted) + +![class=medium-inline left Sign then encrypt then mac](/linaro-website/images/blog/sign-then-encrypt-then-mac) + +Security properties: + +1. Confidentiality +2. Integrity +3. Authentication +4. Authorization +5. Non-repudiation + +Concerns addressed: + +* MAC tag assures integrity of encrypted blob. +* Allows firmware re-encryption. + +After discussion we opted for the sign-then-encrypt-then-MAC scheme. It gives us assurance of the integrity of the decrypted blob whilst providing us the option to re-encrypt the firmware on the device if needed (for example to replace an ephemeral negotiated key used to transfer it onto the device with a device-specific storage key). + +# Protecting secret key + +Secret key protection may vary from one platform to another depending on the use-case and the underlying hardware capabilities. In order to address this varying requirement, we need to provide an abstraction layer in the TF-A boot-loader (BL2) or OP-TEE TA loader. The loader should provide a weak default API to retrieve the secret key / secret key handle which can be overridden by the platform specific implementation. Note that the secret encryption key is different from the likes of the OP-TEE HUK since it has to be shared with the service provider responsible for encrypting the firmware. + +The other aspect of a secret key is its uniqueness. The secret key can be unique per device or a common shared key across a class of devices. The major benefits of device unique key over class wide key is to limit the attack surface to per device and to defend against software cloning. A problem with device unique keys is that it can make it more difficult to coordinate the delivery of firmware to a fleet of devices, both in manufacturing and in the field. How about leveraging the benefits of both key types? Firmware binding is an optional part of the firmware encryption design that offers this. + +First boot sequence (aka firmware binding) leveraging two key types, Shared Secret Key (SSK) and Binding Secret Symmetric Key (BSSK): + +![Protecting secret key](/linaro-website/images/blog/protecting-secret-key-image-1) + +Subsequent boot sequence leveraging only Binding Secret Symmetric Key (BSSK): + +![Protecting secret key](/linaro-website/images/blog/protecting-secret-key-image-2) + + + +# Upstream status + +Back in November 2019, support for loading encrypted TAs from Linux filesystem made its way to [OP-TEE mainline](https://www.google.com/url?q=https://github.com/OP-TEE/optee_os/pull/3340\&sa=D\&source=docs\&ust=1646317138336730\&usg=AOvVaw2TuaJZxFPTnH6IwNskglt6). It allows one to keep secrets, keys or passwords as part of encrypted TA until they are provisioned into OP-TEE secure storage. Also, it allows the protection of DRM software IP implemented as an encrypted TA. + +Back in March 2020, after extensive security focussed discussions with the TF-A community, experimental support for encrypted Firmware Image Package (FIP) images made its way to the [TF-A mainline](https://trustedfirmware-a.readthedocs.io/en/latest/design/trusted-board-boot.html#authenticated-encryption-framework). Now it is possible to bundle encrypted OP-TEE OS image(s) into FIP. It allows one to keep secrets, keys or passwords as part of OP-TEE OS binary. Also, it allows the protection of DRM software IP implemented as drivers in OP-TEE OS. + +Overall we have been very pleased with the progress so far. Having a common abstraction layer to support firmware encryption in TF-A and OP-TEE is encouraging and would certainly welcome any contributions to improve the firmware encryption framework or to add new platform support. + +Our engineers will, of course, engage in upstream focused discussion via the mailing lists. If you need any additional help in bringing these features to your products and platforms then feel free to reach out to [the Linaro Developer Services team](https://www.linaro.org/services/) or, if you are already a Linaro member, to contact [the Linaro support team](https://www.linaro.org/support). diff --git a/src/content/blogs/python-and-go-in-the-arm-world.mdx b/src/content/blogs/python-and-go-in-the-arm-world.mdx new file mode 100644 index 0000000..3d7fafc --- /dev/null +++ b/src/content/blogs/python-and-go-in-the-arm-world.mdx @@ -0,0 +1,63 @@ +--- +title: Python and Go in the Arm World +description: In this article, Siddhesh Poyarekar takes a detailed look at Python + and Go in the Arm World. Read about his findings here! +date: 2019-12-17T03:46:40.000Z +image: linaro-website/images/blog/code_banner +tags: + - arm + - datacenter +author: siddhesh-poyarekar +related: [] + +--- + +Siddhesh Poyarekar is Linaro Developer Services' Toolchain expert, having worked on glibc upstream since 2012 and a number of different projects before that. Recently, Linaro Developer Services completed work which involved studying and fixing packages in Python and Go ecosystem. In this blog, Siddhesh summarises his findings, including the most common problems encountered on the way. + +Over the past few months we have studied a number of packages in the Python and Go ecosystem to verify that they work on Arm64 servers. Being high level languages, the general assumption is that things should just work and this was true for the majority of packages we looked at. There were a few however that had to be fixed in a variety of ways to make them work correctly on Arm. Over time we realized that there were a few recurring issues across packages. + +The common thread binding packages with issues on aarch64 was that they had some native C/C++ code; a few had embedded assembly. This may have been for a number of reasons, ranging from simply having to interface with a library that is written C/C++, to tweaking specific sensitive parts of the code to get the optimal performance. Interestingly, the ones with embedded assembly often had aarch64 variants already included or at least a sane C fallback. It was the ones with C/C++ code - something that ought to work out of the box - that broke. Here are three of the most interesting, and frequent problems we encountered on our journey. + +## Floating point comparisons + +This was by far the most common problem we encountered across packages. A number of packages had test cases that compared results of floating point computations with predetermined values. The test would fail if the comparison failed. The problem with this kind of testing is that floating point computations can have small differences in results due to minor differences in the computation sequence that may mathematically mean the same thing. + +To their credit, many of the mature packages like scipy and numpy had floating point test cases with tolerances built in, i.e. they only required their results to be accurate within a tolerance threshold and that’s the right way to do this kind of testing. Even so, these thresholds need to be adjusted for architectural differences. + +A key architectural difference in floating point computations is the floating point Multiply+Add operation, i.e. an operation of the form M\*A+B. This operation can either be executed on a machine as two instructions, i.e. an add followed by a multiply or as a single FMA (Fused Multiply Add) instruction. The rounding semantics (and hence the result) of both these variants of operations are different and hence depending on what instruction sequence is selected, the output may vary. The two multiply and add instructions for example, have a rounding step in between that may change the input that goes into the add instruction. The fused multiply-add instruction on the other hand, may not necessarily have that rounding step in between and hence the input into its add step may be different by 0 or more bits compared to the two separate instructions. + +This difference becomes evident when compiling for aarch64 as opposed to x86\_64. The default build options on x86\_64 are conservative so that the resultant binaries execute on all target machines ranging from the Atom to the very latest Intel or AMD offering. The downside of this is that the resultant code always uses the two instruction variant of multiply-add for floating point since the FMA extension is not available on older x86\_64 processors. Compilers for aarch64 on the other hand emit the fmadd instruction by default and as a result, a floating point computation involving multiply and add operations will end up with different results on aarch64 as opposed to x86. + +The easiest way to work around this is to adjust tolerance of floating point comparisons wherever possible. In cases where adjustment is not possible, users should prefer the fused version of multiply add since it is typically faster and is the way forward for all architectures. This means that on x86, one should move towards building with FMA by default and falling back to old code only if FMA is not present. + +## Converting floats + +A relatively more hazardous and yet surprisingly ubiquitous issue was the conversion of floating point numbers to integers. The ISO C standard defines conversions from floating point to integer types as follows: + +\ *6.3.1.4 Real floating and integer* + +\ *1 When a finite value of real floating type is converted to an integer type other than \_Bool,* + +\ *the fractional part is discarded (i.e., the value is truncated toward zero). If the value of* + +\ *the integral part cannot be represented by the integer type, the behavior is undefined. 50)* + +The first part of the statement does say that the fractional part is discarded but the second part is key. If the integral part cannot be represented by the integer type, the behaviour is undefined. This means that if a negative floating point number is cast to an unsigned integer, the result is undefined. The same holds when a floating point value cast to an integer type is larger than the largest value the type can accommodate; the result is undefined. + +In both of these cases, the x86\_64 instruction cvtsd2si happens to set the result to a value that approximates the integer value, either by truncating the bit pattern or emulating an overflow. Due to this, a cast of -2.7 to unsigned integer would give -2 on x86\_64. On aarch64 though, the corresponding fcvtzu instruction (or fcvtzs for signed conversion), if the result does not fit into the range of the target, the result is simply set to zero. + +This undefined behaviour is non-trivial to detect and trap and it is recommended that developers audit their code to find such cases. Using the Undefined Behaviour Sanitizer in the libsanitizer library is a useful tool as well; this can be enabled with the -fsanitize=float-cast-overflow instrumentation flag in gcc or llvm. + +This behaviour need not turn up in plain old C/C++ either, even high level statically typed languages such as Go inherit these undefined behaviour semantics. Dynamically typed languages may do the right thing since it is their responsibility to do these conversions; python for example sets results that are consistent across aarch64 and x86\_64 for corner cases. + +## Using Multiple Cores + +The x86\_64 world is dominated by CPUs with few cores, each with high clock speeds. As a result, the incentive to parallelize jobs is not that great. The situation changes greatly when one moves to Arm64 servers. In this world high core counts (32+) are common with each core having relatively low clock speeds for superior power management. Porting a typical python program to this world can breach hard coded assumptions of performance when in reality, the program is just not using all of the resources at its disposal. + +One key improvement area is in builds. Builds managed using makefiles have long been capable of being parallelized and as a result builds of traditional software, such as the Linux kernel or gcc, scale quite well on Arm64 servers. The Python world however is dominated by setup.py build, which is single-threaded. Changing this would require a significant effort in getting components such as Cython to recognize dependencies so that they can build source files in parallel. + +The Go ecosystem doesn’t do much better either since even if projects have Makefiles, they’re typically just wrappers for sequential go build & go test and there’s little in there to ensure that independent targets are built in parallel. + +## Conclusion + +Arm64 servers are gradually making their way into data centres due to their superior power management capabilities and improved performance/watt. Ecosystems around high level languages such as Python and Go make it incredibly easy for developers to build and deploy their applications and with some work, we can make sure that these applications can make the most out of the resources they have, and in a safe manner, on Arm64 servers. We shared some gotchas to look out for in applications which ought to be useful for porting applications to aarch64. These are just some common problems we saw and we’re sure there may be more related to undefined behaviours that x86 characteristics may mask. Hopefully this serves as a starting point for developers to embark on their Arm64 server journey. diff --git a/src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx b/src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx new file mode 100644 index 0000000..1110ff2 --- /dev/null +++ b/src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx @@ -0,0 +1,199 @@ +--- +title: " QEMU 8.2 and Linaro’s Maintainer Story" +description: > + We discuss the impact of Linaro’s leadership in QEMU maintenance, the upcoming + 8.2 release and the latest Arm architectural features you can now experiment + with inside QEMU’s system emulation. +date: 2023-12-11T21:12:03.000Z +image: linaro-website/images/blog/Banner_Core_Technologies +tags: + - qemu + - security +author: alex-bennee +related: [] + +--- + +# Introduction + +Without trying to make me out as more of a greybeard than I actually am, I celebrate 10 years at Linaro this year. I joined the team to work on adding support for emulating the recently announced Aarch64 architecture to QEMU. In the years since, the team has been steadily keeping up with the evolution of the architecture and at the last count we’ve added a total of [98 additional extensions](https://qemu.readthedocs.io/en/master/system/arm/emulation.html) to the main A-profile CPU as well as recent additions to the older M-profile class of CPUs. With the latest release and the addition of the [Cortex-A710](https://www.arm.com/products/silicon-ip-cpu/cortex-a/cortex-a710) model developers can now experiment with a next generation Arm v9 processor which sets a new baseline of advanced functionality for a modern processor. + +I’ll talk about some of those new features later in the post but first I want to talk a little about QEMU’s maintainer story. + +# Maintainership Matters + +At Linaro we take our responsibilities to the wider Free, Libre and Open Source (FLOSS) ecosystem very seriously. While open source empowers anybody to download, examine and modify code, maintainers play a key role in keeping projects and communities active and productive. In QEMU maintainers are responsible for the bulk of code review as well as collecting patches for their subsystems and funnelling that code into tested pull requests that are eventually merged into the mainline. It is usually maintainers who are involved in the essential work of modernising and renovating the creakier legacy parts of the code base to keep the project fit for the current generation. + +The year before I joined Linaro we were well represented in the contribution stats placing in 5th place in terms of changesets and lines. However, contributions were dominated by the major enterprise suppliers for whom QEMU is a key part of the virtualisation story. Red Hat has always held the crown in terms of contribution and in my first year was still responsible for 30-40% of the changes going into the codebase. + +### Top changeset contributors by employer (Dec 2013 to Dec 2014) + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
   Red Hat
+

   2283 (39.3%)

+
+

   Linaro

+
+

   622 (10.7%)

+
+

   IBM

+
+

   590 (10.1%)

+
+

   Individual Contributors

+
+

   548 (9.4%)

+
+

   Huawei

+
+

   207 (3.6%)

+
+

   SUSE

+
+

   199 (3.4%)

+
+


+ +In the years since I joined we have continued to invest in the project as it has become a key part of delivering for Linaro and its members. About a fifth of the [MAINTAINERS](https://gitlab.com/qemu-project/qemu/-/blob/master/MAINTAINERS?ref_type=heads) entries on the project are now Linaro email addresses covering areas such as the core TCG translator, testing frameworks, debug and introspection code as well as of course the Arm emulation support and a slew of the modelled devices. + +[Last year](https://www.linaro.org/blog/next-qemu-development-cycle/) while discussing our development plans for QEMU I shared some stats showing how important a role we play in QEMU development. With the 8.2 release it looks like we have just overtaken Red Hat’s excellent record in supporting the project. + +### Top changeset contributors by employer (Nov 2022 to Nov 2023) + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

   Linaro

+
+

   2762 (30.6%)

+
+

   Red Hat

+
+

   2565 (28.5%)

+
+

   Individual Contributors

+
+

   554 (6.1%)

+
+

   IBM

+
+

   297 (3.3%)

+
+

   Ventana Micro Systems

+
+

   206 (2.3%)

+
+

   Academics

+
+

   196 (2.2%)

+
+
+


+ +# What’s new in 8.2 + +While every QEMU release brings [new features to Arm](https://wiki.qemu.org/ChangeLog/8.1#Arm) this release is the culmination of several QEMU releases to bring FEAT\_RME emulation to QEMU. The Realm Management Extension (RME) is a key component of Arm’s Confidential Compute Architecture (CCA). The additional realm state allows guests to run in such a way that even the non-secure host cannot access any area of the Realm unless explicitly shared by the Realm itself. In common with other big features we’ve implemented previously (e.g. [SVE](https://www.linaro.org/blog/sve-in-qemu-linux-user/), [TrustZone](https://www.linaro.org/blog/arm-trustzone-qemu/)) there is more to implement than just the feature itself. When the feature specifies it depends on other features being present we have to backfill those in our support matrix. As FEAT\_RME was only introduced with the Arm v9 architecture, we have to ensure QEMU can now model that baseline. If you want to see the full list of what we had to implement feel free to peruse our [JIRA cards](https://linaro.atlassian.net/browse/QEMU-466). + +![Diagram of the ARM security architecture showing the various security states and exception levels interact.](/linaro-website/images/blog/arm-cca-software-architecture) + +For now we only support software realms (securely interfacing realm guests to real HW involves a lot more work). As the feature is so new we still recommend that those who wish to experiment with it track QEMU’s master branch so they can get the benefit of fixes as soon as they are patched.  + +## Overview of QEMU command-line options for RME + +Because of the restricted support described above, FEAT\_RME is considered experimental in QEMU and thus not enabled by default. + +FEAT\_RME itself is enabled with “-cpu max,x-rme=on”.  In addition, EL3 and EL2 must be enabled for the board model. Per the figure detailing the CCA Software Architecture, the RMM runs in EL2 and the TF-A monitor runs in EL3. + +Thus the basic cpu and machine configuration are done with + +``` +qemu-system-aarch64 \ + -M virt,virtualization=on,secure=on,gic-version=max \ + -cpu max,x-rme=on ... +``` + +## Overview of software stack for QEMU and RME + +Support for QEMU and RMM are still being staged into upstream Trusted Firmware, and support for guest Realms is still being staged into upstream KVM and QEMU.  A complete set of instructions for building the complete software stack from Linaro repositories has been [published separately](https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/Building+an+RME+stack+for+QEMU), and will be kept up-to-date. + +## Example of booting a Realm Guest + +A [set of binaries](https://fileserver.linaro.org/s/Grjs6kSkBYd8DkX) has been created using the above instructions as a demonstration.  Unpack the tar file and execute “rmm-example/run-host.sh”: + +![Picture of QEMU running with tabs for the host and realm consoles as well as the secure and non-secure serial ports.](/linaro-website/images/blog/rmm-example-run-host.sh) + +The firmware and the kernel’s earlycon will log to serial0, the secure monitor will log to serial1, and the host and realm guests consoles will be on HostConsole and RealmConsole respectively. + +Log into the host as “root” with no password, then execute “/mnt/run-guest.sh”.  The debugging that is enabled within the firmware will immediately begin logging about the realm creation: + +![Picture of QEMU continuing to run, this time with the serial port outputting diagnostics from the Realm Machine Manager (RMM)](/linaro-website/images/blog/mnt-run-guest.sh) + +Within a few minutes, the guest will boot: + +![Picture of QEMU continuing to run, this time with the Realm Console showing the login prompt of the Realm image that has been started.](/linaro-website/images/blog/guest-will-boot) + +Again, one may log in as “root” without password.  The guest filesystem is minimal, so there is really very little to see at this point.  However, it does demonstrate a successful boot to Realm user-space.  Hooray! + +Run “halt” on the guest and then host to shutdown cleanly, or abruptly kill the entire emulation by closing the window or “quit” from the QEMU monitor on stdin.  🙂 + +# Final Thoughts + +The work to enable FEAT\_RME supports a number of aims of the [core QEMU team](https://linaro.atlassian.net/wiki/spaces/QEMU/overview) within Linaro. We upstream useful architectural features so the wider open source community can experiment with them before general availability of hardware. We also provide reference platforms for the rest of Linaro to develop software. You can expect more activity over the next year as our Linaro colleagues work to ensure all layers of the software stack are ready for real silicon when it arrives. As what is going on inside the real hardware will be inscrutable by design I’m sure engineers will also appreciate having the ability to peek inside the inner workings of QEMU when it comes to the inevitable debugging phase of the project. + +As we look to the new year we still have a number of upcoming features to implement for the architecture as well as improvements for using QEMU in early modelling and bring-up tasks. You can see our upcoming roadmap on the [team's project page](https://linaro.atlassian.net/wiki/spaces/QEMU/overview). We’ll see you on the [qemu-devel mailing list](https://lists.gnu.org/archive/html/qemu-devel/)! diff --git a/src/content/blogs/recent-developments-in-the-open-cmsis-pack-project.mdx b/src/content/blogs/recent-developments-in-the-open-cmsis-pack-project.mdx new file mode 100644 index 0000000..3229e74 --- /dev/null +++ b/src/content/blogs/recent-developments-in-the-open-cmsis-pack-project.mdx @@ -0,0 +1,45 @@ +--- +title: Recent developments in the Open-CMSIS-Pack Project +description: > + In June 2021, Arm transferred the CMSIS-Pack technology to Linaro under a new + project named Open-CMSIS-Pack. The project is delivering a standard for + software component packaging and related foundation tools for validation, + distribution, integration, management, and maintenance of microcontroller + software. In this blog we talk about the work that has been achieved so far + and what the project has planned in the future. +date: 2021-12-09T09:21:57.000Z +image: linaro-website/images/blog/96b-nitrogen-relays-resize +tags: + - iot-embedded +author: bill-fletcher +related: [] + +--- + +It’s been six months since [Arm transferred the CMSIS-Pack technology to the Linaro IoT and Embedded Group ](https://www.linaro.org/blog/arm-transfers-cmsis-pack-technology-to-linaro/)under a new project named Open-CMSIS-Pack. The project is delivering a standard for software component packaging and related foundation tools for validation, distribution, integration, management, and maintenance of microcontroller software. It aims to create a flexible and easy to use end to end development flow - from project creation to execution of the software on real or virtual hardware - for embedded software. + +## What has the Open-CMSIS-Pack project achieved so far? + +* Project Manager - essentially uses Project Files and CMSIS-Packs to create self-contained CMSIS-Build input files +* Directory organisation - workspaces, pack inventory and root directories +* Taxonomy and multi-context terminology +* Component identifier syntax + +## Open-CMSIS-Pack project milestones for December 2021: + +* Provide an updated version of [CMSIS-Build v0.10.4 incorporating the new ‘cpackget’ utility](https://github.com/Open-CMSIS-Pack/cpackget) for installing missing public packs +* Share a first development snapshot of the ‘Project Manager’ command line tool for review and use case exploration + +## The Open-CMSIS-Pack project's longer-term goals: + +* Evolution of the CMSIS-Pack standard and enable adoption by wider software industry to deploy frameworks, for example for Cloud connectivity or machine learning +* Deliver foundation technology and base tools that can be integrated into toolchains from Open-CMSIS-Pack partners and the wider eco-system +* Improve the inter-operability of various software projects by providing a common way to describe components, interfaces, and other attributes + +CMSIS-Pack technology already provides device support for close to 9,000 different microcontrollers, making project integration of drivers, middleware and other software components across multiple Arm-based devices much easier. [AWS recently made an announcement to deliver FreeRTOS LTS libraries in CMSIS Pack format](https://www.freertos.org/2021/10/freertos-lts-libraries-are-now-part-of-our-partner-toolchains.html). + +STMicroelectronics, NXP Semiconductors and Arm are the founding members of the Open-CMSIS-Pack project which is hosted in the Linaro IoT and Embedded Group. + +Visit [Open-CMSIS-Pack Project](https://www.open-cmsis-pack.org/index.html) and check out the associated repositories to learn more. You can also follow the links to find notes and recordings of our weekly meetings which you are welcome to join. + +For more details please contact contact@linaro.org. diff --git a/src/content/blogs/reducing-code-size-with-llvm-machine-outliner-on-32-bit-arm-targets.mdx b/src/content/blogs/reducing-code-size-with-llvm-machine-outliner-on-32-bit-arm-targets.mdx new file mode 100644 index 0000000..406f067 --- /dev/null +++ b/src/content/blogs/reducing-code-size-with-llvm-machine-outliner-on-32-bit-arm-targets.mdx @@ -0,0 +1,109 @@ +--- +title: Reducing code size with LLVM Machine Outliner on 32-bit Arm targets +description: > + In this article we talk about how the LLVM release will see 32-bit Arm targets + gain full support of the Machine Outliner code size optimization for Arm and + Thumb-2 instruction sets. +date: 2021-04-13T01:26:45.000Z +image: linaro-website/images/blog/code_highway-2- +tags: + - open-source + - arm +related_projects: + - LLVM +author: yvan-roux +related: [] + +--- + +With the upcoming release of LLVM 12.0.0, 32-bit Arm targets have gained the full support of the Machine Outliner code size optimization for Arm and Thumb-2 instruction sets. The expected code size gain provided by this optimization is around 5% on average (you can jump straight to the results part for more details). It is not turned on by default (see How to use it section) but our goal is to have it enabled under -Oz for all Arm cores inside LLVM 13.0.0. + +Function outlining is a compilation process which consists of replacing a chunk of consecutive statements with a call to a new function containing those statements. In a nutshell, it is the inverse of the well known inlining optimization. It is used in different areas of compilation to achieve various goals such as code refactoring or kernel extraction in source to source compilers, shrinking large functions to reduce compile time in JIT compilers, or performance improvement by splitting hot and cold regions of a function and performing partial inlining as presented in \[1]. + +As mentioned above, the Machine Outliner objective is code size reduction, close to what Identical Code Folding (ICF) at link time is doing \[2]. It is an interprocedural optimization (i.e not tied to function boundaries) which operates on LLVM machine specific intermediate representation (a.k.a MIR) at the last step in the optimization pipeline, right before code emission (code selection, register allocation, instruction scheduling, etc. are already done). + +Let’s look at a simple example: + +![Machine Outliner Example 1](/linaro-website/images/blog/machine-outliner-example-1) + +In the Arm assembly generated for this C code, we can see (on the left side) that the highlighted instructions on lines `<3,4,5>`, `<9,10,11>` and `<15,16,17>` are exactly the same, and thus candidates for being outlined. The Machine Outliner will identify this redundancy, extract the code into a new function, and replace it by calls to this function as can be seen below: + +![Default code generation vs outlined version](/linaro-website/images/blog/default-code-generation-vs-outlined-version) + +## History + +Machine outlining optimization pass was originally developed by Jessica Paquette from Apple in 2016 \[3] and presented at the LLVM developers’ meeting \[4]. It was primarily developed for AArch64 (with a minimal support for X86\_64 as well) and firstly available in the LLVM 5.0.0 release. It was later extended for RISC-V targets and included in the LLVM 10.0.0 release in 2019. For 32-bit Arm, we have made an initial version available in LLVM 11.0.0 and we have continued to improve it in order to deliver complete support in LLVM 12.0.0. + +## How does it works + +The algorithm can be divided into three steps: + +### **Identification of candidates** + +This is done by walking through all the basic blocks of the program to find the longest repeated sequences of MIR instructions, which can be reduced to the longest common substring problem \[5] where basic blocks are the strings, and instructions the characters.This class of problems can be solved efficiently with a generalized suffix tree representation. + +In the example below, the two functions calc\_1 and calc\_2 can be represented by strings ABABC and AABC respectively. A generalized suffix tree is built after padding these strings with a unique terminator (# and $). The depth of an internal node of this tree represents the length of a candidate and the number of leaf nodes reachable from it, the number of times it is repeated. Looking for repeated substrings which have a minimum length of two in our example will give us BC which is repeated two times, AB repeated three times and ABC repeated two times. + +![Generalized suffix tree for strings ababc and aabcc](/linaro-website/images/blog/generalized-suffix-tree-for-strings-ababc-and-aabcc) + +### **Removal of unsafe or unbeneficial cases** + +Now that we have a list of candidates, we have to take care that outlining these pieces of code will not break the program behavior and will actually reduce its size. Indeed not all instructions can be safely extracted. Conditional branches are part of the instructions or sequences which can’t be safely outlined, like when an operand is an index of a constant pool or jumptable for instance or if the sequence contains a label which is used to compute an offset position-independent code (PIC) mode, etc… Thus, such candidates are removed from the list. See below a slightly modified example: + +![Machine Outliner-Removal of unsafe or unbeneficial cases](/linaro-website/images/blog/machine-outliner-removal-of-unsafe-or-unbeneficial-cases) + +We have two candidates on lines `<2,3,4>` and `<10,11,12>` and two more on lines `<6,7>` and `<14,15>` which would, once outlined, give the code below which is broken. Indeed the return instruction outlined on line 14 is predicated and is only executed if r0 is lower or equal to r1, which means that if it is not the case when OUTLINED\_FUNCTION\_0 is called on line 2, the program will not come back to perform the subtraction on line 3 as it should do, but fallthrough and execute the multiplication on line 17 which is not the correct behaviour of the program. + +![Machine Outliner-Example 2 of Removal of unsafe or unbeneficial cases](/linaro-website/images/blog/machine-outliner-example-2-of-removal-of-unsafe-or-unbeneficial-cases-) + +Let’s continue with our example, now that unsafe candidates have been removed, we only have two instructions from two call sites outlined into one function, the size of our binary file is 28 bytes (12 instructions of 4 bytes: 5 in calc\_1, 5 in calc\_2 and 2 in OUTLINED\_FUNCTION\_1) which is the same size as the file obtained without outlining, so there is no point in doing it in such cases. To guarantee that the code size is reduced when a candidate is outlined, we need to check that this inequality is true, and remove the candidates otherwise: + +N x Co + Cs + Fo < N x Cs + +Where: +N is number of candidate occurrences +Cs is the size in byte of a candidate +Co is the overhead (added instructions) in byte at call site +Fo is the overhead (added instructions) in byte in the outlined function + +### **Function splitting** + +Once we have a safe list of candidates, it remains to actually transform the code, by creating the new functions and replacing each candidate by calls. But given the nature of the instructions which compose a candidate, or their context, it is not always as straightforward as what we have seen in previous examples. + +Let’s look at the three cases presented in the table below: + +![Function Splitting Table](/linaro-website/images/blog/function-splitting-table) + +In calc\_1 the outlined region is not a tail-call or a return instruction this time, thus it is needed to insert one (line 14) and a Branch with Link (bl) is used to call the outlined function (which will save the return address into the link register lr). It is the same thing for calc\_2, but it is also needed to save and restore lr around the call (lines 2 and 4) to preserve the return address used on line 6, it can be done either by using a spare register (like r4 in our case) or by pushing it on the stack if none are available. The last case adds another constraint, because the region outlined from calc\_3 contains a call to another function (line 15), lr needs to be saved and restored (lines 9 and 17) in order to jump back to the correct address. As it is done on top of the stack, the offsets of the instructions which are accessing it must be changed accordingly (line 12). + +## How to use it + +The Machine Outliner pass is enabled by default inside the aggressive code size optimization level -Oz for AArch64 and M-profile cores for 32-bit Arm, but it can also be invoked manually or disabled with the -moutline/-mno-outline flags. + +![Machine Outliner Pass](/linaro-website/images/blog/machine-outliner-pass) + +It is also possible to get information about the transformation made by the pass, by using LLVM remarks for it with flag -Rpass=machine-outliner, for instance in our first example it will give: + +![Rpass-machine-outliner](/linaro-website/images/blog/rpass-machine-outliner) + +## Results + +As we have seen, Machine Outlining is always a win-win for code size optimization, in the worst case your code will not be touched at all, but on average the expected code size reduction on top of the existing aggressive code size optimization level -Oz is \~5% for Arm mode and \~4% for Thumb-2. If we look at a benchmark suite such as SPEC CPU 2017, we see that we obtain the best results on large benchmarks (up to 14% on parest for instance) which is expected since there are better chances to find repeated sequences of instructions in a large code base than in tiny tuned mathematical libraries for instance. It is also very beneficial when combined with Link Time Optimization (LTO) which operates on the whole program and not per files and already provides some very good results. The Machine Outliner can go further as we can see on blender (-14% in LTO and -23% with the Outliner) or gcc (-8.5% in LTO and -18.7% with the Outliner) for instance. + +![SPEC CPU 2K17 Code Size in Arm table](/linaro-website/images/blog/spec-cpu-2k17-code-size-in-arm-mode) + +![SPEC CPU 2K17 Normalized code size in Arm table](/linaro-website/images/blog/spec-cpu-2k17-normalized-code-size-in-arm-mode) + +## Bibliography + +\[1] [https://webdocs.cs.ualberta.ca/\~amaral/papers/ZhaoAmaralSBAC05.pdf](https://webdocs.cs.ualberta.ca/~amaral/papers/ZhaoAmaralSBAC05.pdf) + +\[2] [https://storage.googleapis.com/pub-tools-public-publication-data/pdf/36912.pdf](https://storage.googleapis.com/pub-tools-public-publication-data/pdf/36912.pdf) + +\[3] [https://lists.llvm.org/pipermail/llvm-dev/2016-August/104170.html](https://lists.llvm.org/pipermail/llvm-dev/2016-August/104170.html) + +\[4] [https://www.youtube.com/watch?v=yorld-WSOeU](https://www.youtube.com/watch?v=yorld-WSOeU) + +\[5] [https://en.wikipedia.org/wiki/Longest\_common\_substring\_problem](https://en.wikipedia.org/wiki/Longest_common_substring_problem) + +For more information on Linaro and the work we do, do not hesitate [to contact us](https://www.linaro.org/contact/). diff --git a/src/content/blogs/reimagining-linaro-virtual-events.mdx b/src/content/blogs/reimagining-linaro-virtual-events.mdx new file mode 100644 index 0000000..c1d507a --- /dev/null +++ b/src/content/blogs/reimagining-linaro-virtual-events.mdx @@ -0,0 +1,45 @@ +--- +title: "Reimagining Linaro Virtual Events " +description: In 2020 we hosted our first online event and in is this blog we + will explore some of the benefits and challenges of virtual events in the last + 2 years. +date: 2021-11-18T16:46:45.000Z +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +strap_image: /assets/images/content/30921188158_953bca1c9f_k.jpg +tags: [] +author: kristine-dill +related: [] + +--- + +In Spring 2020, we planned to host Linaro Connect in Budapest, Hungary but due to the start of the COVID-19 pandemic we had to cancel our in-person event and thus began our journey with virtual events. We hosted our first online event [in March 2020](https://resources.linaro.org/en/tags/2014c600-6140-43ee-aa5f-db7fa650bfd5) and have hosted two Linaro Virtual Connect events per year since then.  + +Our virtual events have re-created some of the elements from Linaro Connect including low level technical sessions and training for developers, important industry updates and announcements, technology demos, and the opportunity to hear from and network with other software and hardware developers and maintainers. Virtual events have some benefits - low cost, ability to reach a wider audience, free attendance, and a way to continue to provide interesting and informative content to the Arm and Open Source community during the pandemic.   + +While there are benefits, virtual events have presented a unique set of challenges. Scheduling speakers from all over the world across multiple time zones, competing with work schedules and time with family and friends, and how to keep attendees engaged and encourage online networking just to name a few. Originally, we tried to recreate the in-person Linaro Connect session model by hosting as many sessions as possible over three days. Now, after two years of virtual events experience we’ve started to re-evaluate this approach. Why do we need to have three tracks over three consecutive days with over seventy sessions covering more than fifteen different topics? It made sense to do this when we had four hundred attendees flying to an event to meet in person, but now that we’re virtual, why not split these days into smaller, more easily digestible and more targeted events? Virtual event fatigue is real, and we have found many people prefer to consume the content in their own time. This tells us that it’s time for a change.  + +We still think virtual events are important and we have valuable content and work to deliver to our audience, so we came up with a new plan to combat virtual event difficulties. Starting in December 2021 and continuing through 2022 we will change our virtual events model to include two types of virtual events: Linaro Connect Tech Days and Linaro Connect Tech Webinars.  + +Linaro Tech Days are one-day single track events consisting of five to eight sessions focusing on a specific theme. Linaro Tech Webinars are a single session or training (one-two hours). All sessions will still be recorded and posted online after the events.  + +Our plans for in-person events in 2022 are still pending while we monitor the COVID-19 pandemic. Some of the best parts about Linaro Connect simply cannot be re-created virtually. Chatting with a speaker after their session, ‘hallway' chats in the corridors, working with other developers face to face in the hacking rooms, having a cup of coffee with a colleague and after hours socializing and dinners. We’ve tried virtual chat rooms, group trivia games, virtual expo halls, etc. and they do not have the same effect as being together in-person.  We will continue to appraise the safety of hosting an event in person and monitor travel restrictions and other factors. All announcements will be made to our Linaro Connect Mailing List ([subscribe here](https://linaro.us3.list-manage.com/subscribe?u=14baaae786342d0d405ee59c2\&id=7cf0551a9b)) as well as on the website. We hope to host a true Linaro Connect again with all of you soon.  + +Until then, here is the current plan for virtual Linaro Tech Days and Linaro Tech Webinars. We will be adding to this list over the coming months.  + +Linaro Tech Webinar: LIVE: [“Introduction to Eigen”](https://www.linaro.org/events/introduction-to-eigen/) \ +by Everton Constantino; Linaro Engineer and Eigen Maintainer\ +Thursday 9th December, 2021 at 17:00 UTC\ +Cost: Free + +Linaro Tech Webinar: LIVE ["Kernel Debug Stories"](https://www.linaro.org/events/kernel-debug-stories-for-arm-linaro-connect-tech-webinar/)\ +by Daniel Thompson Linaro Principal Tech Lead \ +-February 8 (English language) \ +-February 15 (Mandarin language) \ +Cost: Free + +Linaro Tech Webinar on [Qualcomm Modem/IPA](https://www.linaro.org/events/ipa-enabling-data-connectivity-on-the-snapdragon-compute-platform-linaro-connect-tech-webinar/): March 2022\ +Linaro Tech Day on Core Technologies: March 2022 \ +Linaro Tech Day on Client Devices: June 2022\ +Linaro Tech Day on Automotive/IoT/Edge: September 2022 + +All details for these events and additional events will be available on our [Linaro Events Page](https://www.linaro.org/events). diff --git a/src/content/blogs/renesas-electronics-and-nxp-semiconductors-n-v-join-the-trusted-firmware-project.mdx b/src/content/blogs/renesas-electronics-and-nxp-semiconductors-n-v-join-the-trusted-firmware-project.mdx new file mode 100644 index 0000000..a48d070 --- /dev/null +++ b/src/content/blogs/renesas-electronics-and-nxp-semiconductors-n-v-join-the-trusted-firmware-project.mdx @@ -0,0 +1,23 @@ +--- +title: 2 New Members Join Trusted Firmware Project +description: Renesas Electronics and NXP Semiconductors N.V. join the Trusted + Firmware Project hosted by Linaro, as new members. Read more here. +date: 2020-03-25T05:34:48.000Z +image: linaro-website/images/blog/shutterstock_723213985-web +tags: + - security + - arm +author: linaro +related: [] + +--- + +Earlier today, Trusted Firmware - the project hosted by Linaro Community Projects Division - announced that Renesas Electronics Corporation and NXP® Semiconductors N.V. have joined as new members. They join existing members Arm, Cypress, Data IO, Futurewei, Google, Linaro, ST Microelectronics and Texas Instruments. You can read the complete press release [here](https://www.trustedfirmware.org/news/Renesas-and-NXP-announcement/). + +The Trusted Firmware Project is designed to reduce porting and integration work across the ecosystem by creating reusable reference implementations for SoC and Trusted OS developers. The project collaborates on the development of Trusted Firmware-A (TF-A), Trusted Firmware-M (TF-M) and OP-TEE. + +This gives SoC developers and OEMs a reference trusted code base complying with the relevant Arm specifications and forms the foundations of a Trusted Execution Environment (TEE) on application processors, or the Secure Processing Environment (SPE) on microcontrollers. The collaborative design, development and validation amongst the project members allows faster and cost-effective deployment of secure devices. + +Membership of the Trusted Firmware Project is separate from Linaro membership and Linaro membership is not required. Governance is overseen by a board of member representatives while Linaro provides legal and financial hosting and support. + +To find out more about Trusted Firmware and how to get involved, go to [www.trustedfirmware.org](https://www.trustedfirmware.org). diff --git a/src/content/blogs/securing-a-device-with-trusted-substrate.mdx b/src/content/blogs/securing-a-device-with-trusted-substrate.mdx new file mode 100644 index 0000000..c65d0fd --- /dev/null +++ b/src/content/blogs/securing-a-device-with-trusted-substrate.mdx @@ -0,0 +1,259 @@ +--- +title: Securing a device with Trusted Substrate +description: In this article we talk about how to secure your embedded device + with Trusted Substrate. +date: 2022-02-24T12:03:14.000Z +image: linaro-website/images/blog/Banner_Security +tags: + - u-boot + - iot-embedded + - security +author: ilias-apalodimas +related: [] + +--- + +## Introduction + +One of the great challenges with deploying devices at the edge is security. On such devices we need to make sure the running software hasn’t been tampered with and we also have to assume that malicious physical access to the device is always a possibility. Is securing edge devices against such threats doable with existing software and hardware? + +In this blog we look at how to secure an embedded device with Trusted Substrate - a BIOS that delivers standards based secure booting and over-the-air (OTA) updates. + +## Why do we need to trust a device? + +The first thing we need to do is make sure the device software (firmware or OS) can’t be modified without approval. For firmware in the Arm world this can be done by using a ‘Root Of Trust Public Key’ or ROTPK and a Stage 1 Boot Loader (BL1) stored in trusted ROM. Using Arm Trusted Firmware for Cortex-A (TF-A), each bootloader stage verifies the next one before executing it. + +* BL1 is responsible for authenticating BL2 +* BL2 is responsible for authenticating all the subsequent BL3x stages, until eventually execution is transferred to BL33 + +On SystemReady devices BL33 is an EFI bootloader so from that point onward EFI can take care of the loaded kernel authentication. If there’s a need to authenticate the filesystem as well, there are existing tools someone can use (e.g dm-verity). +At this point we’ve cryptographically verified everything so it should be hard for an attacker to tamper with our system. There are still a few issues we need to address though. +Protecting against physical tampering + +The device tree that we use, if loaded externally, can’t be trusted. Neither can we trust the initramfs we use to load our OS. Arguably we can work around these problems. For the initramfs we can concatenate it with our kernel and verify it as part of the EFI secure boot process, while for the DTB we can provide it as part of the bootloader which is cryptographically verified by BL2. + +The biggest problem remains physical access to devices. An attacker with direct access to the hardware could entirely replace or compromise the flash which holds the secure database used for our EFI keys. In that scenario he can sign and launch a completely different binary and everything will appear to be authenticated. + +So how do you stop this from happening? + +## Securing your device through Trusted Substrate + +U-Boot supports [EFI TCG2 protocol](https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf). Its purpose is to define APIs and provide information for things like, is a Trusted Platform Module (TPM) present, which Platform Configuration Register (PCR) banks are active, obtain the Trusted Computing Group (TCG) boot log, extend hashes to PCRs, append events to the TCG boot log and so on. + +We did an [introduction to TPMs in a previous article](https://www.linaro.org/blog/how-to-emulate-trusted-platform-module-in-qemu-with-u-boot/) but the most interesting features for us are Platform configuration registers (PCR) and key sealing. + +PCRs are large enough to hold cryptographic hashes and start zeroed out while they can only be reset on a system reboot. Those can be extended by writing a Secure Hash Algorithm (SHA) hash (typically SHA-1/256/384/512 for TPMv2) into the PCR. The TPM concatenates the new hash to the existing PCR value and another SHA is calculated which is now stored in the PCR. This gives us the ability to measure different stages of the boot process. As a result, all measurements are interdependent and changing any (measured) component during the boot flow will result in an entirely different PCR value. + +If the device has internet access we can perform ‘remote attestation’. In that case the TPM can provide a digital signature of the PCR values along with an EventLog to a remote server. +The remote server can then inspect the EventLog, replay it and verify it’s validity against the provided PCRs and compare those against a good-known-state of the system configuration stored in the local database to detect tampering. + +If internet access is unavailable we can encrypt our filesystem and seal the key safely into the TPM. If any of the measured components changes, the TPM will never release the key which allows you to decrypt your filesystem. + +Meta-trustedsubstrate is an OE layer geared towards security. It’s built from a variety of open source projects, an ‘upstream first’ mentality and provides a [SystemReady-IR compliant ](https://www.arm.com/architecture/systems/systemready-certification-program/ir)firmware with UEFI Secure boot and Measured boot enabled by default for all supported hardware. It’s aim is to guarantee that your device will run the software it was intended to run starting from powering up your device up to launching it’s OS. + +## So how do you get started with Trusted Substrate? + +Below we provide a step-by-step guide to setting up UEFI Measured boot on Socionext’s DeveloperBox with meta-trustedsubstrate. + +A while ago we explained how storing EFI Variables into an RPMB partition of an [eMMC](https://www.linaro.org/blog/protected-uefi-variables-with-u-boot/) works. There are basically three requirements: + +* OP-TEE support +* A working eMMC in U-Boot and Linux +* Upstream U-Boot support + +With an eMMC available we can also use Microsoft's firmware TPM (or [fTPM](https://github.com/microsoft/ms-tpm-20-ref/)) and enable measured boot even if your hardware doesn’t have a discrete TPM. +There's a lot of firmware components that need to be compiled for this to work. Just for reference you will need + +[SCP-firmware](https://github.com/ARM-software/SCP-firmware) +[TF-A](https://github.com/ARM-software/arm-trusted-firmware) +[OP-TEE](https://github.com/OP-TEE/optee_os) +[U-Boot](https://github.com/u-boot/u-boot) +[EDK2](https://github.com/tianocore/edk2) +[StandAloneMM from edk2-platforms +](https://github.com/tianocore/edk2-platforms)[fTPM](https://github.com/microsoft/ms-tpm-20-ref/) + +You will also need complex instructions on how to compile those and assemble the image. Using meta-trustedsubstrate makes the whole process trivial. You can produce a firmware with: + +``` +git clone https://git.codelinaro.org/linaro/dependable-boot/meta-ts.git +cd meta-ts +kas build ci/synquacer.yml +``` + +### Update the firmware + +You can find detailed instructions [here](https://apalos.github.io/Measured%20boot%20on%20DeveloperBox.html#Measured%20boot%20on%20DeveloperBox). The tl;dr version is flip DSW2-7 to enable the serial flasher, open your minicom and use xmodem to send and update the files. + +All the files you need will be located at build/tmp/deploy/images/synquacer/ + +``` +flash write cm3 -> Control-A S -> send scp_romramfw_release.bin +flash write arm-tf -> Control-A S -> send fip_all_arm_tf_optee.bin +flash rawwrite 0x500000 0x100000 -> Control-A S -> send optee/tee-pager_v2.bin +flash rawwrite 0x200000 0x100000 -> Control-A S -> send u-boot.bin +``` + +### Install a distro + +I am using Fedora on the example here, but given that U-Boot versions since 2021.04 are SystemReady-IR compliant any COTS distro should work. + +``` +sudo dd if=Fedora-Server-netinst-aarch64-35-1.2.iso of=/dev/sdX bs=128M status=progress +``` + +Plug in your usb stick in a port and start up the board. In U-Boot's console do: + +``` +usb reset +load usb 0 $kernel_addr_r efi/boot/BOOTAA64.EFI && bootefi $kernel_addr_r +``` + +Since I am using the box in headless mode (the GPU support has [known issues](https://www.96boards.org/documentation/enterprise/developerbox/support/known-issues.html)), installing via VNC is a nice option to avoid the console nuisance. + +Start the installer and enable VNC + +![Enabling VNC](/linaro-website/images/blog/enabling-vnc3) + +Make sure you encrypt the filesystem using a password as we'll be needing this later on: + +![Encrypting filesystem](/linaro-website/images/blog/encrypting-filesystem-v1) + +It's worth noting that since U-Boot does not support SetVariable at runtime you'll get an error while the installer is trying to update the EFI Boot#### variables. This is far from fatal, you can just continue the installation and fix up the boot options later. + +![Encrypting filesystem image 2](/linaro-website/images/blog/encrypting-filesystem-v2) + +Once the installation completes, you will have three partitions: EFI, boot, and the LUKS encrypted root. + +Reboot your board and stop U-Boot at it's console. + +``` +nvme scan +efidebug boot add -b 0 Fedora nvme 0 EFI/fedora/shimaa64.efi +efidebug boot order 0 +bootefi bootmgr +``` + +That should set [SHIM](https://github.com/rhboot/shim) as your first boot choice. + +### Enabling fTPM + +The kernel modules needed for Microsoft's fTPM are included in the Fedora35 kernel. However, since it relies on OP-TEE to provide the RPMB access, you need to start the OP-TEE supplicant before the module gets inserted. + +Compiling optee\_client +On the target system, get a copy of optee\_client, compile it and install it. The default installation will end up on /usr/local/sbin (future Fedora versions will include an optee-client package). + +``` +git clone https://github.com/OP-TEE/optee_client.git +cd optee_client && mkdir build && cd build +cmake ../ -DRPMB_EMU=0 +make -j$(nproc) +sudo make install +``` + +Enabling fTPM on systemd +Create /etc/systemd/system/tee-supplicant.service with the following contents + +``` +[Unit] +Description=tee supplicant + +[Service] +User=root +ExecStart=tee-supplicant +Restart=always + +[Install] +WantedBy=sysinit.target +``` + +and enable the service + +``` +sudo systemctl enable tee-supplicant +``` + +If you reboot your system now your firmwareTPM should be operational. You can check the logs with + +``` +sudo tpm2_eventlog /sys/kernel/security/tpm0/binary_bios_measurements +``` + +### Sealing the key + +Remember when you installed an encrypted filesystem? With the TPM up and running now you can automate the decryption of your root filesystem and bind the keys to specific PCRs. + +Fedora has clevis, an automated encryption framework, which can make your life easier as far as key sealing and unsealing is concerned. Make sure to install the necessary packages before you start with + +``` +sudo dnf install clevis clevis-luks clevis-dracut clevis-udisks2 clevis-systemd +sudo clevis luks bind -d /dev/nvme0n1p3 tpm2 '{"pcr_ids":"7"}' +``` + +The PCR usage is described [here](https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_PFP_r1p05_v23_pub.pdf) + +* PCR0: SRTM, BIOS, Host Platform Extensions, Embedded Option ROMs and PI Drivers +* PCR1: Host Platform Configuration +* PCR2: UEFI driver and application Code +* PCR3: UEFI driver and application Configuration and Data +* PCR4: UEFI Boot Manager Code (usually the MBR) and Boot Attempts +* PCR5: Boot Manager Code Configuration and Data (for use by the Boot Manager Code) and GPT/Partition Table +* PCR6: Host Platform Manufacturer Specific +* PCR7: Secure Boot Policy +* PCR8-15: Defined for use by the Static OS +* PCR16: Debug +* PCR23: Application Support + +You can use any PCR you think is appropriate for your device security, but the most scalable for us is PCR7, which holds the EFI keys and the UEFI SecureBoot state. Measuring that would render physical attacks against the EFI variable storage useless - changing the keys would yield a different value for PCR7. We can also use the TPM to measure the initramfs and DTB we load to the device, providing the guarantees we need for those files. + +### Adding tee-supplicant to your initramfs + +As I mentioned earlier, the fTPM relies on the op-tee supplicant for the RPMB accesses. So the missing piece of the puzzle in order to access your TPM, unseal your key and decrypt the filesystem, is to create an initramfs with the needed modules and the tee-supplicant. + +Create /usr/lib/dracut/modules.d/60tee-supplicant/ directory, copy the /etc/systemd/system/tee-supplicant.service we created earlier and add a module-setup.sh file with the following contents: + +``` +#!/usr/bin/bash +check() { + require_binaries /usr/local/sbin/tee-supplicant || return 1 + return 0 +} + +depends() { + return 0 +} + +install() { + inst /usr/local/sbin/tee-supplicant /sbin/tee-supplicant + inst "$moddir/tee-supplicant.service" "$systemdsystemunitdir/tee-supplicant.service" + $SYSTEMCTL -q --root "$initdir" add-wants cryptsetup.target tee-supplicant.service +} + +installkernel() { + hostonly='' instmods =drivers/char/tpm + instmods tee optee +} + +You can now re-create your initramfs with + +sudo dracut --add clevis-pin-tpm2 --add tee-supplicant --force +``` + +If everything is setup correctly you should see something along the lines of this on your screen + +``` +Welcome to Fedora Linux 35 (Server Edition) dracut-055-6.fc35 (Initramfs)! + +\[ OK ] Reached target Basic System. + Starting Cryptography Setu…8ff0-43f6-9484-b4f16ff43093... +\[ OK ] Started tee supplicant. +Please enter passphrase for disk Samsung SSD 960 EVO 250GB (luks-5fe9fed9-8ff0-43f6-9484-b4f16ff43093): +but this time you won't have to supply a password! +``` + +## Future work + +Distros use an intermediate bootloader called SHIM before loading GRUB and eventually your OS. SHIM, which mainly exists to bridge the signing authority gap between commercial firmware implementation - microsoft keys and distros, is responsible for verifying GRUB which in turn is responsible for verifying your OS kernel. This makes sense for large servers and commercial hardware with a number of hardware combinations. + +However this adds an unnecessary complexity and increased attack surface on small purpose-built devices. In a follow up post we will explore booting the kernel directly and enabling UEFI secure boot using Trusted Substrate and the [LEDGE reference platform](http://releases.linaro.org/components/ledge/rp-0.3/). + +To find out more about Trusted Substrate and how to get involved, visit [our project page](https://linaro.atlassian.net/wiki/spaces/TS/overview). diff --git a/src/content/blogs/security-and-the-zephyr-project.mdx b/src/content/blogs/security-and-the-zephyr-project.mdx new file mode 100644 index 0000000..78e38ff --- /dev/null +++ b/src/content/blogs/security-and-the-zephyr-project.mdx @@ -0,0 +1,53 @@ +--- +title: Security and the Zephyr Project +description: In this blog, David Brown explains the process and measures taken + to resolve the vulnerabilities of the Zephyr project and to ensure the + codebase is secure. +date: 2020-11-26T04:21:45.000Z +image: linaro-website/images/blog/cyber-security +tags: + - security +author: david-brown +related: [] + +--- + +## Security + +David Brown is the Security Architect for the Zephyr Project. He is also the security lead for the Linaro IoT and Embedded (LITE) working group. In addition to general security of IoT devices, David is a maintainer for the MCUboot secure bootloader project. + +Part of the charter for the Zephyr Project specifies that there shall be a security subcommittee. This committee consists of an individual from each Platinum member company, along with two elected positions, a Security Architect (currently me), and a Chair. The Chair is responsible for running the regular security meetings (every two weeks), and the Architect is responsible for the overall security of the project. + +In the past, this security subcommittee, often just called the security team, or sometimes the security working group, has produced a set of documentation describing the security goals and process for the project, as well as developed a preliminary threat model. + +A more recent development has been on the process of managing discovered vulnerabilities, including the CVE system. External to our project, the CVE database (Common Vulnerabilities and Exploits) assigns a unique identifier to each vulnerability discovered across any participating project. On September 7, 2017, the Zephyr Project became a CNA, or CVE Numbering Authority. By producing certain documentation describing our processes, this allows us control over an allocation of CVEs so that we can manage these ourselves. + +## Feeling vulnerable + +The key idea behind this CVE system is the idea of a vulnerability. One way to think of a vulnerability is that it describes a characteristic of a system (typically a software bug) that can be exploited to cause unexpected or unintended behavior in that system. The consequences of these can vary from mildly annoying, to quite devastating, such as allowing remote privilege escalation, and even control over a device. The CVE system provides numerous rules and guidelines as far as how to allocate these numbers, and how to determine priorities. + +In May of 2020, the project received a report from the NCC group outlining several dozen vulnerabilities found in the Zephyr codebase. We began the process of allocating CVEs for this, and improving and documenting the methods we use to track these issues. + +Because, somewhat by definition, these vulnerabilities are exploitable, it is important to not disclose this information too early. Ideally, the fixes should be propagated to end devices before information on the vulnerability itself is released. In addition to the regular challenges of doing this, being an open source project makes this more difficult, because development itself is done in the open. + +## Tracking issues + +We address this in a few ways. First, we don’t use the regular Zephyr bug tracking system to track these vulnerabilities. We have a separate instance of JIRA that has been configured to support issues being embargoed, or hidden from the public, until an embargo date has been reached. Since development of patches continues the same as other changes, on Github, this adds a bit more complexity to these fixes. Generally, developers will try to describe what has been fixed in the commit text, and leave out details explaining that the issue is a vulnerability, or how it addresses this. This can make understanding these patches later more difficult, but the hope is that the rest of the system in place will allow this information to be found after the embargo has been lifted. + +When each vulnerability is created, it will be allocated a CVE number. These numbers are allocated from MITRE, and currently assigned to us in blocks. They are working on an API that will allow us to allocate them in a more automated manner, on demand. If an issue is still under embargo when a Zephyr release is made, the release notes will simply contain a reference to the CVE, without a mention of the fix. We have a vulnerabilities page that shows all vulnerabilities found in the project. For CVEs that are still under embargo, there will just be a placeholder on this page. + +Our embargo period has undergone a few revisions. Initially, it was fairly short (60 days). Although this is adequate for a project that produces end devices, it isn’t really enough time to propagate fixes to users of Zephyr that are themselves building devices. Therefore, we have extended this embargo period to 90 days, with a goal of having fixes in place within Zephyr within 30 days, giving 60 days for end users to be able to apply and deploy the fixes into their own devices. + +In order for these product creators to know about the vulnerabilities before the embargo ends, we have created a vulnerability registry where individuals can register for a mailing list to receive alerts. Ideally, this will be restricted to those who are making products using Zephyr. + +After the embargo ends on a particular vulnerability, several things will happen. First, the vulnerabilities page will be updated to include more details about the vulnerability. This information is kept separate from the release notes because the release notes are fixed with each release, and cannot be updated after the release. Second, the CVE database itself will be updated to also include the details of the release. + +There are numerous sites that monitor the CVE database, and updating this information will generally result in reports being generated containing information about the issues, and their fixes. + +## Doing the work + +Per the project charter, this security team consists of an individual from each member. In the past, we set up a rotation to determine who would process vulnerabilities. Unfortunately, this also included additional work monitoring static analysis and became a bit overwhelming. Static analysis is important, but isn’t entirely about security, and needs to be addressed independently of just the security team. We are now beginning a rotation based on just vulnerabilities themselves. + +Security process is something we are continually seeking to improve. As we work through the process, it is important to be aware of ways to improve the process, ultimately with the goal of making Zephyr itself secure, and allowing it to be used to create secure products themselves. + +For more information about Zephyr’s security communication or vulnerability reporting, visit [https://www.zephyrproject.org/security/](https://www.zephyrproject.org/security/). diff --git a/src/content/blogs/setting-up-tensorflow-for-windows-on-arm.mdx b/src/content/blogs/setting-up-tensorflow-for-windows-on-arm.mdx new file mode 100644 index 0000000..48981b7 --- /dev/null +++ b/src/content/blogs/setting-up-tensorflow-for-windows-on-arm.mdx @@ -0,0 +1,90 @@ +--- +title: Setting up Tensorflow for Windows on Arm +description: In this blog, Everton Constantino talks about how to set up + Tensorflow for Windows on Arm. +date: 2022-06-28T09:34:14.000Z +image: linaro-website/images/blog/Banner_AI +tags: + - windows-on-arm + - ai-ml +author: everton-constantino +related: [] + +--- + +# Introduction + +Tensorflow is one of the major machine learning frameworks used today. It is usually distributed via Python packages built using Bazel. At the time of writing, there is no official package available for Tensorflow to run natively on Windows on Arm. However, to help developers use these tools, this blog post shows how you can build your own Tensorflow package using some patches we developed to do this. + +The process starts with setting up the environment, then you build your own Bazel and finally build Tensorflow. We have enabled Bazel for Windows on Arm. The version that the official binary is available in is 5.1. + +Unfortunately it is not possible to build our current Tensorflow branch with this version, which is why you will need to build it from source. All the steps were tested on a Surface Pro with Windows 11. + +# Setup Requirements + +* Microsoft Visual Studio Community Edition 2019. Download at [https://visualstudio.microsoft.com/vs/community/](https://visualstudio.microsoft.com/vs/community/) - please note that this is not the latest release and make sure to install the Windows SDK. +* Bazel 4.2.1 for Windows x86. You will use this to bootstrap the Windows on Arm Bazel version. Download the source from [https://github.com/nsait-linaro/bazel](https://github.com/nsait-linaro/bazel), check out branch 4.2.1-win\_arm64, and then follow the steps on [https://bazel.build/install/compile-source#build-bazel-using-bazel](https://bazel.build/install/compile-source#build-bazel-using-bazel). +* Download a JDK. We used zulu 17.32.13 which you can find at [https://www.azul.com/downloads/?package=jdk](https://www.azul.com/downloads/?package=jdk) and make sure to download the Windows on Arm version. +* Python 3.10.0. This can be downloaded with the nuget tool [https://dist.nuget.org/win-x86-commandline/latest/nuget.exe](https://dist.nuget.org/win-x86-commandline/latest/nuget.exe) and then run the following command: + +``` +nuget.exe install pythonarm64 -Version 3.10.0 +``` + +NB: This only needs to be done for Python 3.10.0 From 3.11.0a5 onwards official binaries for Windows on Arm can be downloaded. Make sure to add this Python to path. + +* LLVM and MSYS2. LLVM for Windows on Arm is being released by Linaro at [https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.0/LLVM-14.0.0-woa64.zip](https://www.google.com/url?q=https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.0/LLVM-14.0.0-woa64.zip\&sa=D\&source=docs\&ust=1656413216028945\&usg=AOvVaw1LwcTzz9MkUsryVAVZ5tgw) and MSYS2 can be found at [https://www.msys2.org/](https://www.msys2.org/), add them to your PATH as well. Then set up a Powershell script with appropriate PATHs and also with the following lines: + +``` +cmd.exe /c "call `"__PATH__\VC\Auxiliary\Build\vcvarsx86_arm64.bat`" && set > %temp%\vcvars.txt" +Get-Content "$env:temp\vcvars.txt" | Foreach-Object { + if ($_ -match "^(.*?)=(.*)$") { + Set-Content "env:\$($matches[1])" $matches[2] + } +} +$env:PATH+=";_PATH_KIT__\arm64\ucrt\;__PATH__\VC\Redist\MSVC\14.29.30133\onecore\debug_nonredist\arm64\Microsoft.VC142.DebugCRT" + +``` + +Where **PATH** is the Visual Studio CE installation path and **PATH\_KIT** the Windows SDK installation path. +Finally, grab our patched Tensorflow from [https://github.com/everton1984/tensorflow](https://github.com/everton1984/tensorflow) and check out the [win\_arm64v2 branch](https://github.com/tensorflow/tensorflow/compare/master...everton1984:win_arm64v2). + +# Building Tensorflow + +First you should make sure you have read the guide to compile Tensorflow from source at [https://www.tensorflow.org/install/source](https://www.tensorflow.org/install/source). Make sure to also install all required Python packages mentioned in it. + +The repository you downloaded Bazel from added support for a new cpu/OS x64\_arm64\_windows and the main change to Tensorflow is to add support for a new target - win\_arm64. The patch is experimental and some features have not been tested thoroughly, but the work entailed not only adding the new target but also adapting third party libraries to actually take advantage of it and understand that now you can be compiling on Windows but the CPU is not necessarily x86. + +To compile just run: + +``` +bazel build –config=win_arm64 //tensorflow/tools/pip_package:build_pip_package +``` + +Bazel might fail to properly link with python.3.10.0.lib. If that occurs make sure to copy that file directly into LLVM’s library folder. Once this step is complete, execute the script to build the whl package itself: + +``` +./bazel-bin/tensorflow/tools/pip_package/build_pip_package.exe WHL_DIR +``` + +This will build the whl on the path pointed by WHL\_DIR. You will notice that you won’t be able to install that particular whl because of the lack of a dependency tensorflow\_io\_gcs\_filesystem but you can force the installation with the extra pip argument –no-dependencies. + +You can then import tensorflow for your Python scripts. + +# Limitations + +Not all features will work because of a lack of Tensorflow IO for Windows on Arm. Building Tensorflow IO requires a pre-working version of Tensorflow so the bootstrapping process is too complex for this article. Note that without Tensorflow IO several important Tensorflow features will fail to execute. + +In order to execute part of Tensorflow’s unit tests you must use bazel’s test command. We examined the ones under core/kernels, and currently only lmdb\_dataset\_op\_test seems to fail. There seems to be a bug in either bazel or LLVM that requires the tests to be built without dynamic libraries, so it is necessary to add an additional argument –dynamic\_mode=off. + +To execute just the core/kernels test then type + +``` +bazel test –config=win_arm64 –dynamic_mode=off //tensorflow/core/kernels/… +``` + +At present there are no plans for official native support of Tensorflow on Windows on Arm, but that may change if the community pushes strongly for it + +# Conclusion + +Here we show the process of how to build Tensorflow for Windows on Arm. This has been part of a continuous joint effort between Linaro and Microsoft to provide a smooth native Arm environment for its Surface devices and Azure cloud instances. Feel free to read more about this at [https://linaro.org/windows-on-arm](https://www.linaro.org/windows-on-arm). diff --git a/src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx b/src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx new file mode 100644 index 0000000..d20c8f6 --- /dev/null +++ b/src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx @@ -0,0 +1,45 @@ +--- +title: Standard Temperature Tooling in the Linux kernel +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - linux-kernel + - arm + - open-source +date: 2020-02-03T02:35:45.000Z +author: linus-walleij +related: [] + +--- + +With the Linux 5.6 release, it was announced that there is a proper drive temperature driver for disks and solid-state drives with temperature sensors - something that has been in the works for years. So what does this mean? Why is this significant? And how did Linaro play a role? + +In this blog Senior Engineer Linus Walleij talks about Linaro’s involvement and how he would like to see things evolve. + +### What happened and how was Linaro involved? + +It began in August 2018 when I submitted an RFC patch: https://lore.kernel.org/linux-hwmon/20180809222425.17304-1-linus.walleij@linaro.org/ +Then a v1: https://lore.kernel.org/linux-hwmon/20180824191514.14938-1-linus.walleij@linaro.org/ + +I suggested doing SMART temperature readouts only for ATA devices, solving my pet peeve, NAS boxen with one single temperature sensor: the embedded sensor in the harddrive. + +It was discovered that the userspace tools doing these temperature readouts had never been quite adequate. The temperature has little meaning for the overall longevity of the device, which is what the SMART-tools are for. + +Reading temperatures and dealing with temperature zones is for the kernel. + +SCSI maintainer James Bottomley suggested making the feature more generic so that it could alsosupport SCSI drives: https://lore.kernel.org/linux-hwmon/1536949216.3531.35.camel@HansenPartnership.com/ + +I iterated on this approach a few times up to a v7 version of the patch set in november 2018: +https://lore.kernel.org/linux-hwmon/20181118193729.25278-1-linus.walleij@linaro.org/ + +Because of the complexities involved, I got stuck at that and the item went to the back of my backlog for a year. Guenther Roeck however liked the idea, and reworked it from scratch and returned +with a patch on december 8 2019: https://lore.kernel.org/linux-hwmon/20191209052119.32072-1-linux@roeck-us.net/ + +This has been quickly iterated and now is merged to the mainline kernel. + +### Why is this signficant? + +This provides userspace with a unified interface, HWMON sysfs, to discover, monitor and react to all temperature zones in the hardware, also disks and disk stacks. It removes the need for an external tool and relies on standard temperature tooling. What’s more, the SMART tools needed to be run as root and with this more granular policy users can relax privileges on system temperature monitoring tools. As the kernel now knows about the temperature zone in the disk, the thermal policy engine in the kernel can react to it and monitor disks. + +### What does the future hold? + +For embedded the next step for me will be to provide parsing code such that hard drives become attached to device tree nodes. We have generic code in place to create thermal zones from device trees. For storage server systems the feature is usable by system monitoring tools starting with the deployment of kernel v5.6. diff --git a/src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx b/src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx new file mode 100644 index 0000000..2878234 --- /dev/null +++ b/src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx @@ -0,0 +1,70 @@ +--- +title: Supporting Multiple Devices with the same AOSP Images +description: In this blog Amit Pundir and John Stultz discuss the benefits of + having a framework for a shared vendor image which supports multiple devices. +date: 2022-03-08T09:23:59.000Z +image: linaro-website/images/blog/technology-3389917_1920-1- +tags: + - android +author: amit-pundir +related: [] + +--- + +Co-authored-by: John Stultz + +# Introduction + +Creating an Android device has always required and allowed for lots of custom per-device logic and features. While this has been a great benefit, allowing vendors to quickly bring new features to market, it has also caused trouble with fragmentation and lagging updates. + +Google’s recent Treble and GKI efforts to cleanly separate device specific logic from generic system logic has greatly improved the situation, especially around updates, reducing the amount of work a vendor needs to do in order to update their device to the latest Android release. + +But even these efforts have side effects, as while vendors don’t need to redevelop their device specific changes against every Android update, they do need to manage updating and testing and releasing their vendor-specific HALs, for [each device they support](https://android-developers.googleblog.com/2020/12/treble-plus-one-equals-four.html). + +We think this can be further improved upon. By developing a framework for a shared vendor image to support multiple devices, we think vendors can reduce both development effort and the amount of management needed for updating their devices. In this blog we talk about the work involved in creating this framework and the benefits in doing so. + +# The challenge of building AOSP images for each target device + +At the [last virtual Linaro Connect](https://resources.linaro.org/en/resource/8sjfJfUNX3qitL5MW6Tbfz), we discussed the new Qualcomm Robotics RB5 development board (RB5), and how by using iterative upstream development, the RB5 was able to leverage the device support present for the Qualcomm Robotics RB3 Platform Development Kit (also known as Dragonboard 845c or DB845c) already in AOSP. This made the RB5 the easiest Linaro supported development board yet to be added to AOSP. + +But when both developing and testing with these devices, we still had to build AOSP for each target device. It seemed clear it would be a lot nicer if we could save both time and storage and have a single target image which could be used for testing on both boards. + +# Creating a single target image for testing on multiple boards + +The RB5 shares a lot of common IP blocks with prior gen SoCs like SDM845 (used in DB845c) and SM8150 (Qualcomm’s flagship SoC for 2019). That, along with the iterative upstream efforts, make it well supported already by the upstream Linux kernel, mesa and linux-firmware projects. This upstream support is important, because this allows the kernel to abstract most of the SoC differences away, and allows us to re-use the same upstream focused HALs (drm\_hwcomposer, mesa, etc), minimizing changes needed for the AOSP build device configuration. + +For the very few differences between the devices that are not abstracted away, we added a new service which probes /proc/device-tree/compatible for device details at run time to set a vendor property, which then is used to run device specific services, or set device specific configs: e.g. set correct Alsa mixer path or establishing a unique ethernet MAC address on DB845c etc. + +# Booting multiple devices correctly with a single kernel + +The next issue we needed to solve was getting a single kernel that booted properly on both devices. Android's GKI effort has made this much easier as the core kernel is the same on all the devices, but we still need kernel driver modules in place to support both boards. So we added RB5 support in DB845c build and config fragment files in [the android-mainline tree](https://android-review.googlesource.com/c/kernel/common/+/1791854/). It made sure that build.config.db845c artifacts from android-mainline and android13-5.15 trees can boot on RB5 as well. + +The major blocker we ran into was with the bootloader. Specifically, Qualcomm's ABL (edk2/uefi secondary bootloader), which is responsible for loading the kernel and platform specific Device Tree. The primary purpose of a Device Tree (or Device Tree Blob) in Linux is to provide a way to describe non-discoverable hardware ([more on Device Tree here](https://elinux.org/Device_Tree_Reference)). And even though RB5 and DB845c share a lot of common blocks, we are still talking about two different SoCs with enough hardware differences that are not discoverable at run time. So we have to depend on DTB to provide that platform specific information to ABL. + +Now, ideally, the DTB is supposed to be kept and provided by the bootloader, as it is supposed to be tied to the hardware. If that were the case, this would be even easier. However, in practice that is not particularly common, as often DTBs are in flux while drivers are upstreamed, and thus the DTBs end up being managed together with the kernel. With AOSP, the boot image can provide a single DTB as dtb.img or a list of concatenated DTBs as dtb.img. So the first step towards a single AOSP boot image was to concatenate and pass DB845c and RB5 DTBs as the dtb.img, and let the ABL select and load the platform specific DTB from dtb.img. This DTB selection or matching is done based on DTB properties like qcom,{msm-id/board-id/pmic-id}, and since these properties were not supported on DB845c and RB5 initially, we put a hook in the ABL to pick the first and the only DTB it can find in dtb.img. + +# The end result + +So for a concatenated dtb.img to work, we upstreamed these DTB properties for DB845c and RB5 in Linux v5.16-rc1 and also backported them to android13-5.15 GKI common kernel branch. We also updated the ABL to [re-enable the multiple appended DTB support](https://git.linaro.org/landing-teams/working/qualcomm/abl.git/commit/?id=08d45c5), while maintaining backward compatibility to boot these devices with older kernel versions. + +As of today, AOSP’s db845c-userdebug lunch build target will boot on both DB845c and RB5 devboards. Follow the build instructions from [https://wiki.linaro.org/AOSP/db845c](https://wiki.linaro.org/AOSP/db845c) or download prebuilt binaries from [http://snapshots.linaro.org/96boards/dragonboard845c/linaro/aosp-master/](http://snapshots.linaro.org/96boards/dragonboard845c/linaro/aosp-master/) to boot AOSP on DB845c and RB5. + +# Simplified testing and development with a generic vendor image + +So while having generic vendor images to support multiple development boards has been a clear win for us, we also think this method of using has the potential to simplify vendor update logistics as well. + +Currently vendors have custom vendor images for each device, usually built out of a per-device source tree. Even if, as is frequently common, some of the IP-blocks are shared between devices, each device usually has their own fork of the IP-block HAL support. So as bugs or security issues are found and updates are needed, these per-device images must be created, validated and deployed each on their own timeline. + +![Generic vendor image](/linaro-website/images/blog/generic-vendor-image-1) + +And for those HALs derived from the same code base, this may mean repeatedly fixing the same issue separately for each device. + +If vendors utilized a generic image approach, there would be only one codebase that needs to be managed. Fixes to shared HALs would be made only once. Testing and development focus becomes more simplified as there’s only one series of images to track and maintain. + +![Generic vendor image 2](/linaro-website/images/blog/generic-vendor-image-2) + +Testing still has to be done across all devices for each image update, but this is a much simplified story compared with the complexity of keeping track of, and validating, various bug fixes with per-device image versions. + +Now this doesn’t come without costs. Obviously supporting multiple devices in the same vendor image requires more code and thus more space than just supporting a single device. And it’s always easier to just focus on a single device when trying to make short-term deadlines, instead of considering impacts of any changes to other devices. So the trade offs have to be weighed, but we would suggest vendors think about how they can use iterative development and standard upstream interfaces to consolidate the amount of per-device logic they manage, and consider the longer term cost savings they may find when trying to support and maintain the array of devices that they release each year. + +For more information on the work we do on Software Device Enablement for Android, check out our project page [here](https://linaro.atlassian.net/wiki/spaces/SDEFAU/overview). diff --git a/src/content/blogs/the-challenges-of-abstracting-virtio.mdx b/src/content/blogs/the-challenges-of-abstracting-virtio.mdx new file mode 100644 index 0000000..c423a3a --- /dev/null +++ b/src/content/blogs/the-challenges-of-abstracting-virtio.mdx @@ -0,0 +1,57 @@ +--- +title: "The Challenges of Abstracting Virtio " +description: In this blog we talk about the challenges of abstracting Virtio. + Read the blog to find out more. +date: 2022-11-15T04:46:14.000Z +image: linaro-website/images/blog/virtualization-image +tags: + - virtualization +author: alex-bennee +related: [] + +--- + +One of the hats I wear is as the lead for [Project Stratos](https://linaro.atlassian.net/wiki/spaces/STR/overview) which was our virtualisation pathfinder project where we explored approaches to implementing VirtIO devices. VirtIO offers a way to simplify hardware support by decoupling the main guest operating system from having to include drivers for every possible device it might need to interact with in its build. The nitty gritty details of talking to the actual hardware can be kept in a separate Virtual Machine (VM) which helps lower the complexity of distributing the main OS. + +We've had a [couple](https://linaro.atlassian.net/wiki/spaces/STR/pages/28765880340/2022-09-30+Project+Stratos+Meeting+notes) of [meetings](https://linaro.atlassian.net/wiki/spaces/STR/pages/28771778789/2022-10-14+Project+Stratos+Meeting+notes) in the project to try and identify some potential areas of work and for this blog post I'd like to discuss some thoughts about virtio-camera. + +## A quick primer of VirtIO + +I've talked about this in detail in a [previous blog post](https://www.linaro.org/blog/virtio-work/) but in short VirtIO provides a generic abstraction of a piece of hardware. It does this in a way which is friendly to hypervisors by providing a programming model that works hard to avoid expensive "exit" events. The model has been so successful that VirtIO based block and network devices are standard in the world of cloud computing. A VirtIO enabled Linux kernel shouldn't need to worry about the details of the underlying hypervisor to access virtual disks and the network because it can use the same frontend driver either way. + +VirtIO also avoids trying to cater to the lowest common denominator by having a well defined feature discovery and negotiation interface. This means a more functional hypervisor can expose additional features to the host which will only be taken advantage of if the guest is suitably up to date. + +## virtio-camera, cloud native and HALs + +One potential VirtIO device that has been on our radar since the inception of Project Stratos is that of the virtio-camera. It's been mentioned to us by a number of our member companies as something they are interested in while at the same time it's been hard to gather a consistent set of requirements and use cases. I think it is an interesting study in how different sectors of the IT industry can look to a technology to solve different problems. + +Let's first talk about the camera. My history with computer vision goes all the way back to the 80s and the height of the "silicon fen" personal computer boom. Perhaps unsurprisingly my parents were true technology geeks and we had one of the very first hobbyist digital vision systems. It was born from observation that if you removed the protective cover off a dynamic RAM chip and periodically reset the memory you could detect photons. Add a basic lens to focus the image and you could record a simple black and white image into your computer. If you took 3 images with red, blue and green filters you could even re-constitute that image into something approximating colour. + +How digital photography has changed over my lifetime! We now take it for granted that we carry around multi-megapixel sensors in our pocket capable of High Dynamic Range (HDR), slow motion (i.e. high frame rate) combined with advanced computational photography to bring the best out of our hasty snaps. As well as the cameras in our phones we are seeing more in the wider environment, from the ubiquitous Closed Circuit TV (CCTV) to cars where the simple reversing camera has multiplied into arrays of sensors tracking hazards and preparing for the oncoming automation of driving. This wide spectrum of use cases does +raise the question of can a VirtIO device satisfy a diverse range of requirements? + +### Cloud Native development + +One of the use cases of VirtIO is enabling something called Cloud Native development. This is a model of development being driven by organisations like SOAFEE who argue for a [portable software stack](https://www.arm.com/blogs/blueprint/cloud-native-automotive-development) which can be used in the cloud and in their final embedded edge processing location. Rather than re-build your application in a test harness to run through simulations in the cloud you target a virtual platform. From the applications point of view there is no difference between consuming data from your corpus of test data or from a real live sensor. As a result you can have greater confidence in your solution as it is transferred from its cloud environment to the actual edge hardware it will run in. + +There are a number of challenges that need to be addressed for this model of deployment. Most importantly perhaps is that when deployed in edge hardware the performance meets acceptable latency and bandwidth requirements. No one will want to build a safety critical system on something that occasionally delays an image or delivers images in a stuttered fashion. It will also be no good if an algorithm trained on glorious 4k images from the cloud first encounters shakier lower resolution images when first run in the actual car. + +### HAL Abstractions + +Because the features of a phone are so tied to the features of the individual System on a Chip (SoC) that it is based on, there is a very direct relationship between the SoC and the kernel a system can ship. While initiatives like [Project Treble](https://arstechnica.com/gadgets/2017/05/google-hopes-to-fix-android-updates-no-really-with-project-treble/) have tried to reduce the delta between the vendor supplied kernel and a common baseline kernel there is still a lot of vendor code in the systems kernel. This makes upgrading phones difficult - especially when upgrading the rest of the mobile OS itself. Newer versions of Android often require newer versions of the kernel to support other user facing features. If the vendor doesn't update the kernel it locks the phone out of receiving newer versions of the OS. + +One approach to avoiding this sort of dependency is to isolate vendor code into so called driver domains. These are small virtual machines (VMs) with direct access to parts of the hardware. They would be provided by the chipset vendor and would drive the actual HW and present it as a VirtIO device to a generic kernel that hosts the main OS image. Assuming there are no bugs in the back end you are then free to upgrade your generic kernel accessing this Hardware Abstraction Layer (HAL) provided by the vendor code. +Abstraction is of course a mainstay of software engineering. Our entire computing experience is built on abstractions all the way down to the microcode running on CPUs which present an abstraction of the particular CPU architecture that the silicon implements. The challenge of course is avoiding an implementation which just caters to the lowest common denominator. + +As I mentioned earlier modern camera chipsets offer a whole range of features. Aside from the variety of sensor sizes, optics and performance characteristics there is a slew of smart processing that can be done before the image is delivered to the user. For example there may be a secondary depth sensor which can provide the data required to create a [bokeh](https://en.wikipedia.org/wiki/Bokeh) effect to make the foreground element "pop". For mobile applications a virtio-camera would be unlikely to fly if the latest and greatest features provided by fancy new hardware never get exposed to the user. + +VirtIO already provides a feature negotiation mechanism which allows for a device to evolve as time goes on. However, virtio-camera presents an exceptional challenge given the range of features already available and the potential for variation in the details of each implementation. Some devices may be able to alter their configuration for each individual frame captured and some may need a more classical configuration step before starting the capture process. Some features may only be available in a per-frame or a global context. + +### Upstream first? + +The VirtIO framework does make it easy to implement new devices because a lot of the decisions about how to represent things are made for you. When I did my last [survey of reserved IDs ](https://op-lists.linaro.org/archives/list/stratos-dev@op-lists.linaro.org/thread/VPLKMBWYB4PG2X5MTACUIW5SBGBP5HVF/#QD5W5PPV4XTG7TB7QJYQRYKI75SQS3YC)I found a lot of device implementations that lived in downstream forks of kernels. I've no doubt there are many more devices that exist only on some proprietary hypervisors to solve a particular niche issue. Generally these are code-only implementations without the accompanying changes to the [VirtIO specification](https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-64100019) and as a result no discussion on the list about the relative merits of design choices made. + +One of the core approaches to Project Stratos was to leverage VirtIO as a standard interface to enable portability of guests between hypervisors. While open up-streamed drivers are an important part of this it is secondary to having a well thought out and openly developed +specification. As a result for every backend and driver we helped get upstream we also made sure the corresponding specification was also ratified and voted on. + +It is important when developing this specification to get input from as many areas as possible. We don't want early decisions in what will be an evolving iterative design to preclude implementing the advanced features that cameras will continue to gain during the oncoming years. So if you have an interest in bringing a standardised VirtIO interface to life please come and talk to us and bring your experience and feedback. We have learnt a lot during our work on [Project Stratos](https://linaro.atlassian.net/wiki/spaces/STR/overview) and hope to continue collaborating in this space in the years to come. diff --git a/src/content/blogs/the-end-of-an-era.mdx b/src/content/blogs/the-end-of-an-era.mdx new file mode 100644 index 0000000..fe44de3 --- /dev/null +++ b/src/content/blogs/the-end-of-an-era.mdx @@ -0,0 +1,61 @@ +--- +title: The end of an Era +description: In this article, Arnd Begrmann discusses the end of an era with the + Linux-5.6 window, a project which has kept him busy for nealt 6 years. Read + more here! +date: 2020-02-06T09:09:55.000Z +image: linaro-website/images/blog/2038_image +tags: + - linux-kernel + - arm + - open-source +author: arnd-bergmann +related: [] + +--- + +With the linux-5.6 merge window, a project ends that has kept me busy for nearly six years: preventing the “Epochalypse” by changing every single instance of a 32-bit time\_t in the kernel to a type that does not roll over on 2038-01-19. + +While both John Stultz and I had been thinking about and prototyping partial solutions even earlier, the year 2014 is when we started discussing more openly in Linaro and the wider kernel community about what needed to happen. In a team effort, John started rewriting the core timekeeping support of the kernel, working his way out, while I would work my way down from the outside, starting with file systems and then system calls and device drivers with the goal of getting this done by the end of the year. + +### Spreading the Load + +As chronicled on [lwn.net](https://lwn.net/Kernel/Index/#Year_2038_problem), it turned out to take a bit longer. In order to address over 1000 files referencing time\_t, timeval or timespec as of linux-3.15, we recruited help from a number of places. + +The Outreachy program was a great resource for getting a lot of simple changes in drivers done, while internship candidates learned about contributing to the mainline kernel. Tina Ruchandani was my first intern and contributed 25 patches for the y2038 work in 2014/2015. For the 2015/2016 round, Deepa Dinamani joined as the second Outreachy intern and ended up implementing some of the most important bits all the way until the end with hundreds of patch submissions. + +Within Linaro’s Kernel Working Group, I assigned simple driver conversions to new assignees from member companies to get them started on contributing to the upstream kernel while getting the conversion done one driver at a time, before moving on to more review intensive work in the kernel. Baolin Wang worked on converting real-time clocks and the audio subsystem, Firoz Khan’s first contribution was to rewrite the system call tables across all CPU architectures and many others contributed to device drivers. + +### Yak Shaving + +Usually, getting y2038 fixes included was really easy, as maintainers are generally happy to take an obviously correct bugfix that they don’t have to implement themselves. However, some cases turned out to be much more time and labor intensive than we had imagined. + +Converting the VFS code to use 64-bit inode timestamps took countless rewrites of the same patches, first from me and then from Deepa who finally succeeded. We wanted to avoid having to do a “flag day” change, which is generally considered too invasive and risks introducing regressions, and we wanted to minimize the changes for existing 64-bit users and for existing 32-bit applications. Doing this step-by-step change however turned out to add a lot of complexity as well. In the end, Deepa worked out a process of many non-invasive changes over multiple merge windows, followed by [an automated conversion using coccinelle](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=95582b0083883). The same series also fixed unrelated issues in the way some file systems generated their timestamps which reviewers had complained about. + +This is an effect that can be observed a lot in kernel development: when you work on a simple bugfix, there is a good chance that development or review finds a much larger issue that also wants to be addressed, at which point it becomes near impossible to get the simple change merged without also addressing the wider problem. Issues that we addressed along the way include: + +* Changing the time functions away from getnstimeofday() to ktime\_get() and similar conversions addressed the bugs with leap seconds, with time going backwards from settimeofday() as well as some particularly inefficient code. +* File system timestamps are now checked for overflow in a consistent way, and interpreted the same way on 32-bit and 64-bit architectures, extending the range to at least year 2106 where possible. +* The system call tables are now generated from machine readable files, and all architectures support at least the set of standard system calls that are available to newly added architectures. +* Converting all the architectures led to the decision to [clean out architectures that are no longer actively used or maintained](https://lwn.net/Articles/748074/) +* David Howells contributed the statx() system call that solves passing 64-bit timestamps along with many other features that are not present in stat(). +* The handling for 32-bit compat tasks on 64-bit kernels is more consistent with the native system calls now, after a lot of the compat syscalls were rewritten to be shared with time32 support for 32-bit architectures. Most importantly, the compat ioctl() handling is now completely reworked and always handled by the driver rather than a centralized conversion function that easily gets out of sync. + +### Endgame + +With all the VFS and system call changes out of the way during early 2019, the kernel was basically working, but a number of smaller issues still remained. In the summer I set out to make a list of everything that was still missing and revisited patches I had done in the previous years. Instead of creating the list I ended up writing the remaining \~100 patches: alsa and v4l2 were still lacking ABI changes, the NFS implementation and a few other file systems still needed changes, and there were still users referencing the time\_t type. [The resulting branch](https://git.kernel.org/pub/scm/linux/kernel/git/arnd/playground.git/log/?h=y2038-endgame) was basically ready for linux-5.4, and with the usual bug fixes and testing this has now all but made it into the ongoing linux-5.6 merge window. The last patch in the series hides the traditional time\_t definition from kernel space and removes all the now unused helper functions that use it to prevent new references from getting merged. + +### Fixing User Space + +After the time64 system call ABI was finalized in linux-5.1, work on using this in the C libraries got a lot more serious. The release of musl-1.2 is now imminent and will provide time64 for all newly compiled code. Adelie Linux is already migrating to this version and has [a list of known issues](https://wiki.adelielinux.org/wiki/Project:Time64). I expect the bugs to also get fixed in upstream projects soon. The first preview release of a time64 Adelie Linux is [available for testing now](https://distfiles.adelielinux.org/adelie/1.0/iso/rc1/). Most other distributions based on musl are likely to do the same conversion over the next months, depending on their release cycles. + +For glibc, work is still ongoing, the plan at the moment is to move over to 64-bit time\_t as an option in glibc-2.32 later this year. However, the default is still a 32-bit time\_t, and as glibc based distributions tend to have a larger number of packages, there is a very significant effort in rebuilding everything in a coordinated way. Any library that exposes an interface based on time\_t must be recompiled along with all applications and other libraries using this interface, so in the end the result is typically a completely incompatible distribution. The Debian “armhf” port for ARMv7 CPUs is an obvious candidate that will have to go through this transition, but I expect most of the other distributions on 32-bit CPUs to stay with 32-bit time\_t and then stop support before this becomes a problem. + +So far it is looking good for the distro port, as most of the y2038 problems have already been found by the various BSD Unixes that changed over years ago (thanks guys!), so a lot of the remaining problems are either Linux specific, or in applications that have never been ported to anything other than Linux. I expect that once we get into larger scale testing, we will find several sets of problems: + +* Bugs that got introduced by an incorrect conversion to the time64 interfaces, breaking existing source code regardless of the time\_t definition, like the regressions that are inevitably caused by any larger change and hopefully found quickly. For instance, we broke the sparc architecture port multiple times, but then also found ancient sparc bugs from a previous large-scale change that are now fixed. +* Problems of an incorrect or incomplete conversion, breaking 32-bit software after the conversion to 64-bit time\_t, e.g. a format string printing a time\_t as a ‘long’ type rather than a ‘long long’, software that mixes the libc data types with direct calls to low-level kernel interfaces like futex(), or source packages that contain outdated copies of kernel headers such as linux/input.h or sound/asound.h. +* 32-bit software that works correctly with 64-bit time\_t until 2038 but then still fails because of an incorrect truncation to a ‘long’ type when it defines its own types rather than using the ones from system headers. +* Anything that uses fixed 32-bit representation for time\_t values remains broken on both 32-bit and 64-bit applications. This often involves on-disk or over-the-wire data formats that are hard to change. + +The biggest challenge will be to find and update all the devices that are already being deployed without the necessary bug fixes. The general move to 64-bit hardware even in deeply embedded systems helps ensure that most machines only run into the last set of problems, but 32-bit hardware will be deployed for many years to come, and will increasingly run on old software as fewer developers are motivated to work on them. diff --git a/src/content/blogs/the-evolution-of-the-qemu-translator.mdx b/src/content/blogs/the-evolution-of-the-qemu-translator.mdx new file mode 100644 index 0000000..250edf5 --- /dev/null +++ b/src/content/blogs/the-evolution-of-the-qemu-translator.mdx @@ -0,0 +1,131 @@ +--- +title: The Evolution Of The QEMU Translator +description: > + In this article, Alex Bennee looks at the changes the QEMU Translator has seen + over the last 5 years. Read about the evolutuion of the QEMU translator here! +date: 2020-07-22T01:00:55.000Z +image: linaro-website/images/blog/tech_background_2 +tags: + - qemu + - toolchain +related_projects: + - QEMU +author: alex-bennee +related: [] + +--- + +# Introduction + +The QEMU team in Linaro sits inside a group known as the Toolchain Working Group (TCWG). The rest of the team spend their time working with compilers and other code generators such as [GCC](https://gcc.gnu.org/) and [LLVM](https://llvm.org/). When dealing with emulation, QEMU has its own module known as the Tiny Code Generator (TCG). It shares many similarities with a compiler albeit one that works with different constraints than your typical compiler. As the code generator works on a just-in-time (JIT) basis it can't afford to spend large amounts of time (or memory!) that a typical compiler does when optimising its output. This is especially true for code that only gets executed once or twice before being flushed out of the cache. + +## History + +The TCG is actually the second code generator that QEMU has used. Originally QEMU worked as a "template" translator where each individual instruction has a snippet of C code associated with it. The translation was a case of stitching these templates together into larger blocks of code. This meant porting QEMU to a new system was relatively easy because if GCC supported it, you could generate code to run under it. However, eventually the limits of this approach necessitated moving to a new code generator and TCG was born. + +TCG has its roots as a generic back end for a C compiler. The main difference is instead of converting an abstract syntax tree from a high level language into micro ops, its input is the decomposed operations of an individual instruction. + +A simplified version might look something like this: + +``` + static void disas_add_imm(DisasContext *s, uint32_t insn) + { + /* Decode Instruction */ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + uint64_t imm = extract32(insn, 10, 12); + /* Allocate Temporaries */ + TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); + TCGv_i64 tcg_rd = cpu_reg_sp(s, rd); + TCGv_i64 tcg_result = tcg_temp_new_i64(); + /* Do operation */ + tcg_gen_addi_i64(tcg_result, tcg_rn, imm); + tcg_gen_mov_i64(tcg_rd, tcg_result); + /* Clean-up */ + tcg_temp_free_i64(tcg_result); + } +``` + +The decode step involves dissecting the various fields of the instruction to work out what registers and immediate values are needed. The operation is synthesised from TCG ops which are the basic units of the code generator. After a simple optimisation pass, these ops are then converted into host instructions and executed. + +You can see the process yourself if you turn on the debugging options in QEMU although be warned it generates a lot of output: + +``` +qemu-aarch64 -d in_asm,op,op_opt,out_asm testprog +``` + +## Evolution + +While the TCG has been part of QEMU since 2008 it has seen some changes over time. I've been working in and around it since 2015 and I thought it would be an interesting exercise to look at some of the changes it has seen over the last five years. + +### Common Loop and Decode Tree + +Originally each guest architecture just supplied a 'gen\_intermediate\_code' function that dealt with the process of translating a block of guest code into TCG operations. While they all looked fairly similar they also tended to have accumulated their own slight idiosyncrasies. The work to convert to a common translator loop didn't involve any particular bleeding edge technology and was mostly concerned with re-factoring architecture specific parts behind a set of 'TranslatorOps' that would be familiar to anyone who has worked on something like a Linux device driver. The main reason I mention this work is because it opened the way for architecturally independent enhancements to be made functioning of the translator. This includes things like much improved tracing and [TCG plugin](https://qemu.readthedocs.io/en/latest/devel/tcg-plugins.html) instrumentation. + +Another recent innovation is the [Decode Tree](https://qemu.readthedocs.io/en/latest/devel/decodetree.html). This started as an experiment with another of QEMU's testing tools known as Random Instruction Sequence (generator for) Userspace [RISU](https://git.linaro.org/people/peter.maydell/risu.git/about/) which is used to test the instruction decoder. + +Ideally an instruction set fits into a nice regular and tree like decode pattern. However, reality often gets in the way, especially when ISA designers are trying to squeeze additional functionality into an increasingly crowded opcode space. Eventually you end up with functions like [this](https://git.qemu.org/?p=qemu.git;a=blob;f=target/arm/translate-a64.c;h=73d753f11fbe7878e23cbfaa9df38be4d8b96cbd;hb=HEAD#l14381) which do a series of masked pattern tests in a very particular order to tease out exactly which instruction is being decoded. Needless to say this process is error prone and many bugs have occurred due to mistakes in decoding the opcode. + +Decode Tree solves this problem by allowing a simple textual description of the opcode fields and then having a script automatically generate the most efficient decoding of opcode it can. As a bonus it can also automatically extract the fields from the instruction and pass those to a simplified implementation that can just concentrate on the semantics of the operation. + +``` +static void trans_add_imm(DisasContext *s, arg_rri *a) + { + TCGv_i64 tcg_rn = cpu_reg_sp(s, a->rn); + TCGv_i64 tcg_rd = cpu_reg_sp(s, a->rd); + TCGv_i64 tcg_result = tcg_temp_new_i64(); + /* Do operation */ + tcg_gen_addi_i64(tcg_result, tcg_rn, a->imm); + tcg_gen_mov_i64(tcg_rd, tcg_result); + /* Clean-up */ + tcg_temp_free_i64(tcg_result); + } +``` + +Decode Tree was originally written to support the introduction of [SVE](https://www.linaro.org/blog/sve-in-qemu-linux-user/) in QEMU but since then new guests have used it and several existing guest architectures have been converted to use decode tree powered instruction decode. + +### Multi-threaded TCG (MTTCG) + +The original implementation of system emulation was single-threaded and although user-mode emulation followed the threading model of the programs it translated, this was distinctly flaky in its behaviour. The process of converting QEMU to a fully multi-threaded app had started with the introduction of KVM support but for a long time it was always assumed that TCG had too much global state to make multi-threading viable. + +In the end it was a multi-year effort involving contributions from many different sections of the community. You can read about some of the details in a [LWN article I wrote as we approached merge](https://lwn.net/Articles/697265/). There where changes behind the scenes like a lock-less hash table called QEMU Hash Table ([QHT](https://git.qemu.org/?p=qemu.git;a=blob;f=util/qht.c;h=67e5d5b9163f5f33e41f76a7cd261b9f620096f3;hb=HEAD)) which is optimised for the read case as well as front end changes like properly modelling atomic and memory barrier operations. + +Now MTTCG is the default for the majority of the mainline architectures and any new architecture tends to support MTTCG from the start. + +### TCGv\_vec + +When we started working on implementing ARM's [Scalable Vector Extensions for QEMU](https://wiki.qemu.org/Features/ARM/SVE) we realised we were taxing TCG's scalar orientated API. Up until that point most Single Instruction Multiple Data (SIMD) instructions where implemented by manually unrolling into a series of scalar operations. While this worked it was somewhat inefficient, especially if the actual implementation would end up in helper calls anyway (as most floating point operations do). Previous proposals for introducing SIMD TCG ops had been rejected because of the large range of vector sizes that would lead to an explosion of TCG ops - one for each vector size. + +In the end SVE's vector size agnostic approach would be an inspiration for a new API which can encode a vector op on an arbitrarily sized vector. The interface is rich enough that the backend still has the option of using the hosts own vector instructions to generate code while also providing helper based fallbacks for the cases where we can't. There is still a place for target specific helpers but now they can use the TCGv\_vec interface to pass pointers to the register file in a consistent way. While originally written to support SVE work, other targets have started using the interface for their vector implementations. + +### Inline dynamic jumps (tb lookup) + +The translator works by translating a block of instructions at a time. At the end of the block it can jump to one of two blocks. When these are static addresses, that jump will get patched in, once the next block is translated. If the translator doesn't know what to execute next it exits from the translated code back to the outer loop which will either translate a new block or process some sort of asynchronous operation. However, there is one case where we shouldn't need to make such an expensive exit which is that of the computed jump. The translator can't know at translation time where a jump may go, but it can certainly do the lookup inline and avoid the expensive exit. + +## Potential Future Directions + +There is still plenty of scope to improve things, so some of things that are being considered for future improvement include: + +### Pre-caching for linux-user + +While the JIT is fast enough that you don’t notice it even in interactive use it is still quite inefficient in a lot of use cases. A common use case for linux-user mode is using a guest compiler as a fake cross-compiler - effectively running a native compiler on the emulated target hardware. For a typical compilation there is a lot of code that we end up re-generating for every invocation which is a bit of a waste. We could on completion of an execution save our translation cache for the benefit of future runs. + +### More efficient chaining for SoftMMU + +When running system emulation we disable chaining of generated blocks between pages. This is because at any point the system may swap out a page for different contents at which point we would need to find all blocks that jump into a page and invalidate them. However page granularity is overkill for a lot of the code. For example the kernel typically resides in a fixed series of physical pages and never swaps itself out. + +### Hot Block analysis + +Currently the JIT doesn’t take into account any hot sequences of multiple blocks. For example most JavaScript engines will detect when a particular sequence of blocks is in a tight loop and then combine the hot-path into a single heavily optimised sequence. By taking the larger block into account you have more opportunities for traditional optimizations like dead-code elimination and register propagation. + +### Value propagation + +The current optimisation pass is relatively simple as most blocks are quite small and you always need to ensure that values computed in host registers are stored correctly back in the memory that represents the guest registers before the end of the block. However currently we still end up re-loading values more than we need to. Two examples are [constants](https://patchew.org/QEMU/20200508182616.18318-1-richard.henderson@linaro.org/) which are used for multiple operations and store-load propagation where a value is stored in a register and then immediately used for a following operation and is still present in a host register. + +### SSA Form + +Single Static Assignment (SSA) form is a fairly standard way that compilers use to represent the data flow of a particular set of operations. It is favoured by compilers because it makes analysis easier and optimisations become a matter of transforming a tree of operations. QEMU currently uses a simpler virtual register approach which favours faster code generation. There is a trade-off to be made between fast and optimal code generation that we tend not to worry about with compilers (compare for example a -O0 and -O3 compile). It might be a step too far or it might be the gateway to even faster code. We shall have to experiment ;-) + +## Conclusion + +It is fair to assume a lot of the work done in the team is about improving QEMU's ARM specific emulation - see for example the recent [changelog](https://wiki.qemu.org/ChangeLog/5.0#Arm) and [ARMv8.5-MemTag](https://wiki.qemu.org/ChangeLog/5.1#Arm) in the upcoming 5.1 release. However, we also benefit from the QEMU being a healthy project that supports a wide range of host and guest architectures. Our goal is still to make QEMU the go to emulation platform for free software developers to experiment with the latest ARM ISA features - as well as the best free software emulation platform for any architecture. I hope this article has given you a flavour of the sort of changes that have been made to the core translator over the last few years. There is certainly more to come as we continue to work on improving QEMU every day. diff --git a/src/content/blogs/the-kisscache-caching-server.mdx b/src/content/blogs/the-kisscache-caching-server.mdx new file mode 100644 index 0000000..427b42d --- /dev/null +++ b/src/content/blogs/the-kisscache-caching-server.mdx @@ -0,0 +1,100 @@ +--- +title: The KissCache Caching Server +description: > + In this blog, we take a look at the open-sourced KissCache, a simple server + built on the KISS principle: Keep It Simple Stupid. Read more here. +date: 2020-05-18T04:56:56.000Z +image: linaro-website/images/blog/technology-3389917_1920 +tags: + - datacenter +author: remi-duraffort +related: [] + +--- + +Linaro has recently developed and open-sourced [KissCache](https://gitlab.com/linaro/kisscache/), a simple and stupid caching server built on the KISS principle: Keep It Simple Stupid. + +Unlike classical proxies like [Squid](http://www.squid-cache.org/) that transparently intercept traffic, in order to use KissCache one must explicitly prefix the requested URL by the URL of the local KissCache instance. KissCache will download the requested resource in the background while streaming it to the client. + +If many clients are requesting the same resource, KissCache will download it only once and stream the content to every client. + +### **Use case** + +At [](/)[Linaro](/) we use KissCache in our CI system to cache build artefacts (kernel, rootfs, ramdisk, dtb). + +For instance, when [LKFT](https://lkft.linaro.org/) is validating a Linux kernel LTS release-candidate, it will submit many jobs to [LAVA](https://lavasoftware.org/) to be executed on a variety of hardware platforms. These jobs will run in parallel, using many of the same artefacts. Thanks to KissCache, our CI system will download each resource only once, saving network bandwidth. + +![kisscache2.png](/linaro-website/images/blog/kisscache2) + +In the last month, Linaro’s KissCache deployment handled more than 160k requests, serving 32TB of data while only downloading 1TB from outside of the Linaro lab. When artefacts are hosted on a system where network bandwidth is charged per unit (such as Amazon S3), this can amount to several thousands of dollars in savings per month (as is the case in Linaro.) + +### **Alternatives** + +Linaro has long used Squid in the Linaro embedded device Lab, but it has struggled to meet our requirements to: + +* download each resource only once when requesting the same URL in parallel +* cache https resources + +Configuring any proxy to handle https resources is fairly difficult and requires working around the security features of SSL certificates. When a client requests https://example.com while using a proxy, the proxy would need to provide a valid SSL certificate for 'example.com'. This will break usual assumptions about SSL certificates as only 'example.com' should be able to generate such certificates. + +In order to generate a valid certificate for 'example.com', a site admin could: + +* generate a root certificate +* install it on each client +* configure the proxy to sign every requested domain with this root certificate + +The client would accept this fake certificate since it is signed by a known root certificate. + +While this is a working solution, if the root certificate is stolen, an attacker would be able to set up a man-in-the-middle attack on every local SSL connection. + +KissCache does not need to implement such an SSL hack since the client is directly connected to the KissCache instance that can return its own SSL certificate. + +### **KissCache Usage** + +To quickly create a local instance of KissCache do the following: + +``` +git clone https://gitlab.com/linaro/KissCache cd KissCache +docker-compose build +docker-compose up +``` + +The instance will be available at http://localhost:8001. + +You can now use this KissCache instance by prefixing the URL: + +``` +curl +"http://localhost:8001/api/v1/fetch/?url=https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.40.tar.xz" +``` + +KissCache workers will download the resource and stream it back to you. + +## **Configuration** + +### **TTL** + +By default, KissCache will keep each URL for 10 days. The admin may update the default value while users can specify the duration in the request URL. + +``` +curl +"http://localhost:8001/api/v1/fetch/?ttl=1d&url=https://lkft.linaro.org" +``` + +Every hour, KissCache will automatically delete resources that are outdated. + +### **Quota** + +By default, KissCache will use 2 GB of disk space. When the quota is full, KissCache will return a 507 (Insufficient Storage) error for every new request. + +If the quota usage is above 75%, KissCache will drop enough resources to lower the quota usage below 75%. KissCache will drop the least recently used resources first. + +### **Access Restriction** + +KissCache access can be restricted to a given network. Only IPs from a specific subnet would be able to fetch resources while the web interface will remain visible to anyone. + +### **Contact** + +KissCache is licenced under the MIT licence. + +The source code is available on [GitLab](https://gitlab.com/linaro/kisscache). Feel free to create an issue or to send a merge request. diff --git a/src/content/blogs/the-shift-towards-hpc-ai-and-why-it-is-needed.mdx b/src/content/blogs/the-shift-towards-hpc-ai-and-why-it-is-needed.mdx new file mode 100644 index 0000000..2311f09 --- /dev/null +++ b/src/content/blogs/the-shift-towards-hpc-ai-and-why-it-is-needed.mdx @@ -0,0 +1,51 @@ +--- +title: The shift towards HPC AI and Why it's Needed +description: Linaro’s HPC AI Project develops technologies which leverage AI in + infrastructure management tasks such as orchestration and autoscaling. Read + more here. +date: 2021-03-18T03:54:31.000Z +image: linaro-website/images/blog/Dots_lines_datacenter_UNDER_2MB +tags: + - hpc + - ai-ml + - datacenter + - arm + - open-source +related_projects: + - HPCAI +author: paul-isaacs +related: [] + +--- + +## The need for scaling and intelligent decision making + +The shift from on-premise data centers to Infrastructure-as-a-Service (IaaS) has been happening for some time. Whilst storage density has improved significantly (a terabyte now fits on a device the size of a postage stamp), data centers are becoming too big to fit conventional offices. The amount of data being processed requires huge amounts of computational hardware to convert into meaningful time-sensitive information. Engineering resources to maintain this hardware becomes an operational overhead that detracts from the core business. + +There is a need to scale to handle the Petabytes of data, deliver the results across national boundaries and to prioritize shifting workloads. As a result, we see data centers transitioning and consolidating. Across the global market this transition is confirmed by the server-class computing shipments to end-user companies being in decline year on year. + +How do we address these computational needs? + +## Introducing Linaro’s HPC AI Project + +Linaro’s HPC AI Project aims to develop technologies which leverage Artificial Intelligence (AI) in infrastructure management tasks such as orchestration and autoscaling. Managing workloads isn’t just about bandwidth. It is also about knowing when to prioritise what task. This is achieved by supporting workloads through intelligent enablement of infrastructure. + +HPC AI aims to balance data centre workloads across available racks/server chassis in an efficient manner, whether that be to complete the task in the shortest time, maximise the number of parallel jobs or minimise the number of active chassis. The permutations of possibilities are seemingly endless which is why Linaro will incorporate not just Machine Learning models and frameworks into the operation but also aim towards the higher-level artificial intelligence. The benefits of which play into the needs of Hyperscalers as well as the more conventional Cloud Computing. + +## What happens next? + +Over the next year, Linaro plans to heavily leverage the new FX700 Supercomputer to further enable Arm in the Open Source ecosystem. Conventional supercomputer use would sequentially run massive parallel jobs in order. Linaro is creating the flexibility to utilise the FX700 in a variety of configurations, with each being dynamically selectable and controlled via OpenStack Ironic. Options include running an 8-node ( 8 x 48 core and 8 x 32GB memory in a diskless/netboot configuration ) compute cluster, individual compute nodes, or as compute hosts for small virtual machines (multiples of 4GB Ram/6vCPU). + +The FX700 (A64FX with SVE) is not used in isolation. It joins Linaro’s existing multi-chassis installation of ThunderX2 and Kunpeng 910 based devices. Various permutations of all available hardware are selectable according to workload/requirements. This allows the most flexibility for testing, performance evaluation and developing directly on a range of Arm v8.x solutions. + +There is a difference between conventional supercomputer utilisation for end-user simulations and the requirements of Linaro engineers. Linaro’s aim is to ensure that the simulation engines are able to build/run and optimise for A64FX cores in either low or high core count opportunities and to explore the possibility of hosting novel approaches to Artificial Intelligence. + +These efforts will be leveraged by the entire HPC community moving forward as Linaro pushes findings into the upstream as improvements are integrated. This lays the foundations for a very flexible platform for running varying simulations. + +## Want to learn more? + +At my talk at Linaro Virtual Connect 2021, I talked about the journey of data centre transitions and readiness to support AI on a Smart Scalable HPC, including our latest “8-node” Supercomputer. From computational resource selection to Cloud-native software that brings it all together. To watch the session, click on the link below: + +[**LVC21-103: A Journey Towards a Smart Scalable HPC for AI** ](https://resources.linaro.org/en/resource/Ra7pGC3mjyKGbuDGx5dTSi) + +For more information on the work we do in [Cloud Computing and Servers, click here](https://www.linaro.org/cloud-computing-and-servers/). Alternatively [contact us here](https://www.linaro.org/contact/). diff --git a/src/content/blogs/thermal-notifications-with-netlink.mdx b/src/content/blogs/thermal-notifications-with-netlink.mdx new file mode 100644 index 0000000..6b64919 --- /dev/null +++ b/src/content/blogs/thermal-notifications-with-netlink.mdx @@ -0,0 +1,114 @@ +--- +title: Thermal Notifications With Netlink +description: Daniel Lezcano introduces thermal framework design & where + notification takes place to allow userspace to be aware of overall thermal + profile of the system. +date: 2020-07-30T12:19:31.000Z +image: linaro-website/images/blog/electricity-1288717_1920-1- +tags: [] +related_projects: + - PERF +author: daniel-lezcano +related: [] + +--- + +# Introduction + +The goal of the thermal framework is to monitor the temperature of some system components and take immediate action if they are too hot. But how can the userspace know the events occurring in the kernel or what the actions are? + +Recently introduced with expectations to evolve over time, netlink notification is the answer. + +This blog introduces the thermal framework design and shows where the notification takes place to allow the userspace to be aware of the overall thermal profile of the system. + +## The thermal framework - a nice design + +The framework provides a level of abstraction where all the actors are clearly identified: + +* The thermal zone is the abstraction where the hardware sensor implementation provides the backend driver to return the temperature via unified callbacks. +* The cooling device is the abstraction of the device in charge of reducing the temperature. It could be a passive cooling device by reducing the performance of the monitored device like changing the operating point of a CPU, or an active cooling device like a fan. The former does not need extra energy to cool down, while the latter does. +* The thermal governor is the logic which acts on the cooling device to mitigate the temperature. + +The way a thermal zone is monitored will depend on the sensor capabilities: + +* Some sensors can only give the temperature when requested, in this case the thermal zone temperature will be monitored by a periodic timer. That means the idle system will be wake up to check the temperature even if there is nothing to do. +* Some more modern sensors can be programmed to send an interrupt when a specific threshold is reached. In this case, the system can stay fully idle, no wake up is necessary. Please note that the polling mode also introduces a latency in the temperature threshold detection; statistically speaking it is the half of the timer period. For instance, for a one second polling time, the average latency for detection will be 500ms, a duration that is far too large for modern boards which can experience thermal variance at a rate of up to 0.5°C / ms. In this case, the interrupt mode is the guarantee of a synchronous action via the interrupt handling when a temperature threshold is reached. + +The following figure illustrates the different components of the thermal framework and how they interact with each other on a big.LITTLE system. + +![Components Of The Thermal Framework](/linaro-website/images/blog/components-thermal-framework) + +## Tracking the temperature and mitigating + +The SoC vendors must define what the safe temperature ranges are for a component, when to begin the mitigation process and finally when to give up by shutting down the system if the mitigation fails. This is managed by the use of temperature thresholds called “trip points”. In order to take the corresponding action (mitigation or shut down), they are classified by type as PASSIVE, ACTIVE, HOT and CRITICAL. Note the HOT trip is a userspace notification as a last resort to do an action to recover like killing a process or hot plugging CPUs. + +Let’s summarize the dynamic of the thermal framework on modern hardware with an example: + +1. A compute intensive application runs and causes a constant temperature increase. +2. The temperature reaches the PASSIVE trip point and an interrupt is fired to the thermal zone. +3. The thermal zone reads the temperature and finds out what is the corresponding trip point. If it is an ACTIVE or PASSIVE trip point, then the governor logic is invoked to set a state to the cooling device associated with the thermal zone (the higher the state, the greater the cooling effect). If it is a CRITICAL trip point, the system is shut down. +4. If the trend of the temperature is dropping, then the governor will decrease the cooling effect, if it is raising, then the cooling effect is increased. +5. The application ends, no more hardware intensive usage. +6. The temperature drops back down below the PASSIVE trip point and the governor stops the mitigation. + +The effect on mobile devices can be observed with gaming where we can feel how hot the device is and see the game showing unexpected latencies. + +During this mitigation process, the userspace had no clue at all on what was going on and what the thermal situation was. Usually being thermal agnostic is acceptable for desktops or servers as the cooling devices are active and the power consumption is not a problem, tolerable for laptops with a small fan, but unacceptable for fanless battery-powered mobile devices. The userspace, especially in Android systems, have daemons which monitor applications and temperatures to set the correct profile on the system for a better user experience. + +But how can the userspace get information about the current overall thermal profile? + +Actually, it can’t, unless the userspace registers itself as a governor, takes over the kernel logic and handles the mitigation itself. So the userspace can be informed by the thermal framework only if it is the governor. This is a limitation that forces the SoC vendors to implement their own custom solution leading to code fragmentation in the Linux kernel. + +## Thermal notifications + +In response to the lack of thermal communication between the kernel and the userspace, a solution based on the netlink has been implemented for Linux v5.9. + +[Netlink](https://en.wikipedia.org/wiki/Netlink) is a socket-based protocol used for communication between the Linux kernel and userspace. It additionally supports kernel to kernel communication as well as processes to processes. + +With netlink being a socket-based protocol, we get the benefit of the socket framework, so using the well documented socket option allows the userspace to set up the connection to the kernel as needed. + +Another interesting aspect of netlink is its ability to create a communication bus with a multicast channel the processes can subscribe to. Consequently, the notifications can be delivered to multiple processes at the same time. + +Netlink includes a protocol version, so the processes can deal with the supported version and fallback to a previous version if the kernel is older than expected. Thus the thermal notifications can evolve without breaking compatibility by incrementing the protocol version. + +The thermal netlink notification solution has three channels: + +* **Temperature sampling:** every thermal zone update sends a temperature message. If the thermal zone is in interrupt mode and the temperature is below the threshold, then no sampling will be sent until the mitigation happens. In case of polling mode, the temperature sampling will be sent at each update to all processes that are subscribed to the sampling channel. +* **Events:** A thermal zone creation, destruction, a trip point crossed, etc… will emit an event to all processes that are subscribed to the event channel. The list of the events will be defined at the end of this blog. +* **Commands:** The userspace can send discovery commands to get the list of the thermal zones, the trip points and the cooling devices. + +By splitting the channels, the traffic is reduced by preventing the userspace processes to filter out the sampling or the events that aren’t of interest. + +## Nomenclature + +### Sampling + +![Sample Table](/linaro-website/images/blog/sampling) + +### Events + +![Events Table](/linaro-website/images/blog/events) + +### Commands + +![Commands Table](/linaro-website/images/blog/commands) + +The userspace implementation will be merged into the generic netlink library when the protocol is considered stable. Meanwhile sampling code \[07-22-2020] is available [here](https://git.linaro.org/people/daniel.lezcano/thermal-genl.git/). + +**[About the author](https://www.linkedin.com/in/daniel-lezcano-8481435a/)** + +Daniel worked in 1998 in the Space Industry and Air traffic management for distributed system projects in life safety constraints. He acquired for this project a system programming expertise. + +He joined IBM in 2004 and since this date he does kernel hacking and pushed upstream the resource virtualization with the namespaces. He was the author and maintainer of the Linux Container (LXC). + +In 2012, he joined Linaro to work in the power management team. Deeply involved in the power management improvements for the different members of Linaro, he continues to contribute and maintain some parts of the Linux kernel in the power management area. + +Currently, he is maintaining CPUidle for the ARM architecture, the timer drivers and the thermal framework. + +## About Linaro + +Linaro is a Member-based company focused on the de-fragmentation of the Arm software Open Source ecosystem. Linaro also supports the Arm ecosystem through customized services, training, and support. We would love to hear from you and see how we can help you with any Arm-based support, so please feel free to reach out to set up a sync at [linaro.org/contact](https://www.linaro.org/contact/) + +## [About the Kernel Working Group](/core-technologies/toolchain/) + +The Kernel Working Group’s (KWG) primary focus is to be an active contributor to the upstream community and facilitate acceptance of our code into the Linux mainline kernel. Our goal is kernel consolidation - a single source tree with integrated support for multiple Arm SoCs and Arm-based platforms. diff --git a/src/content/blogs/tuxpub-the-serverless-file-server.mdx b/src/content/blogs/tuxpub-the-serverless-file-server.mdx new file mode 100644 index 0000000..9de874c --- /dev/null +++ b/src/content/blogs/tuxpub-the-serverless-file-server.mdx @@ -0,0 +1,67 @@ +--- +title: tuxpub - The Serverless File Server +date: 2020-06-02T11:10:28.000Z +image: linaro-website/images/blog/code_highway_small +tags: + - datacenter +author: ben-copeland +related: [] + +--- + +![Amazon S3 bucket and using Amazon CloudFront](/linaro-website/images/blog/amazon_diagram) + +### **What problems has Linaro solved with tuxpub?** + +At Linaro, we have previously hosted artifacts from Amazon S3 using a custom tool known as Linaro Licence Protection (LLP). LLP started life serving files from local disk storage, then later moved to Amazon S3. Technically LLP provides an S3 browsing interface, however it was never designed to run under a serverless architecture. This coupled with other necessary Linaro/License features (such as authentication) means that LLP doesn’t fit a “simple serverless” model. + +Linaro is presently working on a SaaS offering called [TuxBuild](https://gitlab.com/Linaro/tuxbuild) (and companion service called TuxBoot). These technologies are implemented using the new serverless model and have a need to provide artifacts from cloud storage using a lightweight application that provides a file browser as a web-based user front end. + +The original implementation used Javascript, but we quickly realised it wasn’t scalable, it wasn’t conformant with what web tools expected, and it lacked features which our users were demanding (such as the ability to pull the file contents in JSON). After searching for existing solutions we discovered that there were no available light-weight tools to solve our problems! + +We built a wishlist of the following features and requirements that we felt a proper file server would honour and set about building tuxpub: + +* Serverless methodology for easy deployment and management +* Ability to block the index page so people cannot browse other folders +* Allow users to access a JSON output of the page for easy downloading + +The following is a sample file browser front-end being served by tuxpub for the TuxBuild project: + +![sample file](/linaro-website/images/blog/tuxpub_lrg) + +### **How easy is it to deploy and manage?** + +Linaro can deploy our tuxpub instances with two lines of code and a config file! This procedure is documented in the TuxPub [readme](https://gitlab.com/Linaro/tuxpub#run-with-zappa). To bring up a TuxPub instance a developer only needs to create an application shim with the following zappa code: + +``` +"dev": { + "app_function": "zappa_init.app", + "aws_region": "us-east-1", + "project_name": "lkft-tuxpub", + "runtime": "python3.7", + "s3_bucket": "zappa-tuxpub", + "environment_variables": { + "S3_BUCKET": "storage.dev.lkft.org", + "S3_REGION": "us-east-1", + "ROOT_INDEX_LISTING": "True", + } + } +``` + +With these files a developer needs to build up a [pipenv](https://realpython.com/pipenv-guide/) *file with* “pipenv install --deploy”*, and then deploy it into Lambda with* “zappa deploy dev”\*. + +One can even run the application locally with *“S3\_BUCKET=storage.dev.lkft.org S3\_REGION=us-east-1 ROOT\_INDEX\_LISTING=True FLASK\_APP=tuxpub flask run”*. + +### **What are the limitations?** + +Since tuxpub uses the AWS API there are limitations set by the cloud provider. An index page with more than 1000 objects hits an API limit and generates a nasty error page. Because of this, we intend to implement ‘paging’ support. Tuxpub does not presently support user authentication and has no immediate plans to add it. + +### **Can others use and contribute to tuxpub?** + +Linaro has made tuxpub available as open source software under the [MIT license](https://gitlab.com/Linaro/tuxpub/-/blob/master/LICENSE). This means that it’s free to deploy and modify. We’re very welcoming of pull requests! You can find the code [here](https://gitlab.com/Linaro/tuxpub). + +### **What is the future of tuxpub?** + +Linaro’s objective is to keep this application simple! We are being selective and do not want to add too many features that would bloat the application. Desirable features additions (most notably paging support) are being collected in [tuxpub gitlab issues](https://gitlab.com/Linaro/tuxpub/-/issues) and addressed over time. + +N.B. \*“Pipenv is a packaging tool for Python that solves some common problems associated with the typical workflow using pip, virtualenv and the good old requirements. Txt” - diff --git a/src/content/blogs/update-on-hikey-hikey960-efforts-in-aosp.mdx b/src/content/blogs/update-on-hikey-hikey960-efforts-in-aosp.mdx new file mode 100644 index 0000000..b9562d4 --- /dev/null +++ b/src/content/blogs/update-on-hikey-hikey960-efforts-in-aosp.mdx @@ -0,0 +1,32 @@ +--- +title: Update on HiKey/HiKey960 efforts in AOSP +description: In this article, John Stultz takes a look at the HiKey/HiKey960 + efforts in AOSP as HiKey gets put into retirement. Read more here! +date: 2020-04-20T10:21:20.000Z +image: linaro-website/images/blog/hikey-image1 +tags: + - android +author: john-stultz +related: [] + +--- + +Back in 2016, after an amazing six-month effort of collaboration between various groups in Linaro, HiSilicon, and Google, the HiKey board was the first 64bit ARM dev board that was officially supported in AOSP/master using a v3.18 kernel. + +![class=medium-inline right hikey-image1](/linaro-website/images/blog/hikey-image1) + +After that announcement, the collaborative efforts continued - moving to new kernels (v4.1, v4.4, v4.9, v4.14, and v4.19), and changes from new AOSP releases, and upstreaming kernel support into the mainline tree. Over the last 4 years, The HiKey board became a very useful tool for developing and testing the latest AOSP code with the latest upstream kernel work - be it adding the generic Linux bluetooth HAL, the move from fbdev to drm\_hwcomposer, the EAS scheduler, upstreaming ION functionality via the dma-buf heaps, the Treble effort for Android Generic System Image (GSI), or even the initial proof of concept work to support the Android Generic Kernel Image (GKI). Not to mention all the upstream kernel regressions that were caught and fixed before an LTS release and the resulting android common kernel was even created. + +But the sun has begun to set for the HiKey board. It's not been available via retail for quite some time, the lack of GLES3 support, and the 1GB (then expanded to 2GB) ram size has become quite limiting for the AOSP environment. So in the last few months, documentation on using HiKey has been removed from the public Android web sites, and the needed mali driver support wasn’t added to the android-5.4 kernel, making andorid-4.19 the last stop for HiKey in AOSP. + +That said, while development focus has moved on to other boards, it doesn’t mean HiKey gets to shuffle off into the back of the drawer for retirement. In the Linaro labs, many HiKey boards are still actively working night after night running tests. Since the support for the board (except the binary mali driver) was upstreamed, it is still a very valuable board for validating upcoming changes to the Linux kernel stable LTS releases (from v4.4 to v4.19), which ensures vendors don’t see regressions when they adopt security updates to their already shipped production devices. And as the support code is fairly generic and has not been removed from AOSP, some of us who have become fondly attached to the board still find some time to do regular testing with the latest AOSP/master branch combined with the latest mainline kernels, continuing to utilize the board for that overlap of separate communities that it was first to help bridge. + +But we’re not here for a memorial! The HiKey board’s younger and much more powerful sibling the HiKey960 is still actively supported in AOSP. While recent trade disputes have prevented us from collaborating directly with HiSilicon - Linaro and other members continue to use the board for testing and development. The upstreaming effort on HiKey960 has always been a sore spot, but efforts have slowly continued, with recent changes landing upstream to prep for HiKey960’s display driver support, and extending the dwc3 driver to support HiKey960. But we still have yet to upstream the onboard USB hub/mux support, display driver, and i2s audio. + +![class=medium-inline hikey-image2](/linaro-website/images/blog/hikey-image2) + +With AOSP’s android-5.4 kernel, we have also taken a new direction with how we manage kernel support for HiKey960. Previously, we always kept a separate vendor kernel branch, which was based on the common/android-x.y kernel. We added whatever fixes or even hacks necessary to support the hardware, and tried to regularly merge in updates from the common/android-x.y branch. Unfortunately, this maintenance sometimes fell to the wayside, and updates became not so regular. But with android-5.4, instead of keeping a separate vendor branch, we’ve added the patches needed to support HiKey960 directly to the common/android-5.4 branch (an approach we’re also taking with the Dragonboard 845c). This means we have to be careful, as this branch will be widely shared between all Android devices that use the 5.4 kernel - hacks to support just one board won’t do. Luckily, as much of HiKey960’s support is already upstream, there wasn’t too much to add. This greatly simplifies things for testing, since we now don’t have a separate tree that we have to maintain and update. Instead we can focus our testing directly on the latest version of the android-5.4 tree, which lets us ensure that the code we’re testing for regressions is exactly the same as what vendors will be picking up for their devices! + +Additionally, by being in the android-5.4 tree directly, we are making sure the android-5.4 kernel build for HiKey960 is GKI compliant, using modules for all the board specific hardware support. HiKey960 (along with HiKey and Dragonboard 845c) was a crucial platform in early proof-of-concent work of the GKI ([demoed at the SAN19 Connect](https://twitter.com/johnstultz_work/status/1171915205548183553)). This is important, as one cannot really call something generic until it has been useful on more than one platform, and having multiple devices from different SoC vendors has been an important test point in creating a truly vendor-neutral approach. + +The HiKey960 continues the AOSP dev board tradition of allowing us to create an overlap between the AOSP community and the upstream kernel community. We still regularly use it for testing every upstream Linux -rc release against AOSP/master, which catches regressions early so they don’t make it into a release. And we’ve also used it to validate Android focused changes that we want to upstream. As development priority moves to newer boards (like the Dragonboard 845c), the HiKey960, which is the only AOSP dev board to currently support Vulkan graphics, will continue to be a very useful test device going forward. diff --git a/src/content/blogs/upstream-camera-support-for-qualcomm-platforms.mdx b/src/content/blogs/upstream-camera-support-for-qualcomm-platforms.mdx new file mode 100644 index 0000000..6df0d26 --- /dev/null +++ b/src/content/blogs/upstream-camera-support-for-qualcomm-platforms.mdx @@ -0,0 +1,79 @@ +--- +title: Upstream camera support for Qualcomm platforms +description: In this article, Robert Foss takes a detailed look at upstream + camera support for Qualcomm platforms. Read about his findings here! +date: 2021-02-23T01:49:15.000Z +image: linaro-website/images/blog/code-background_1 +tags: + - linux-kernel + - open-source +author: robert-foss +related: [] + +--- + +The CAMSS driver for Qualcomm® Image Signal Processors (ISPs) isn't new, but now has support for the next generation of ISP architecture. + +Linaro has been working together with Qualcomm to enable camera support on their platforms since 2017. The Open Source CAMSS driver was written to support the ISP IP-block with the same name that is present on Qualcomm SoCs coming from the smartphone space. + +The first development board targeted by this work was the DragonBoardTM 410C, which was followed in 2018 by DragonBoard 820C support. Recently, support for the SnapdragonTM 660 SoC was added to the driver, which will be part of the v5.11 Linux Kernel release. These SoCs all contain the CAMSS (Camera SubSystem) version of the ISP architecture. + +Currently, support for the ISP found in the Snapdragon 845 SoC and the DragonBoard 845C is in the process of being upstreamed to the mailing lists. Having seen major changes, the ISP is no longer referred to as CAMSS, but is instead known as Titan. + +The Titan architecture offers improvements in resolution, framerates and most other dimensions of the ISP, and is the latest architecture shipped on modern Qualcomm chipsets. + +# Overview + +CAMSS is a V4L2 (Video for Linux 2) driver which focuses on supporting the basic use cases of the ISP, such as receiving the [MIPI CSI-2](https://www.mipi.org/specifications/csi-2) (Camera Serial Interface) physical signals from the sensors, decoding them, and then writing them to memory. This leaves a lot of functionality typically provided by an ISP unimplemented, but that is intentional as the development priority has been to enable the data path from camera sensor to userspace. + +The sub-components that the CAMSS drivers supports are the following: + +* CSIPHY - the CSI PHYsical layer manages the physical electrical signals sent by camera sensors. +* CSID - the CSI Decoder decodes the CSI-2 encoded data transmitted by the sensors +* VFE - the Video Front End formats received data and exposes it through to further hardware blocks. The VFE block was renamed to IFE (Image Front End) in the Titan architecture. +* PIX - the PIXel interface is exposed by the VFE, and is used to transmit data prepared for advanced processing by more specialized hardware blocks. +* RDI - the Raw Dump Interface is exposed by the VFE, and is used to write the raw decoded CSI output directly to memory. +* ISPIF - the Image Signal Processor InterFace ties together a lot of specialized hardware blocks into a data pipeline which can provide various levels of additional processing. + +## Qualcomm ISP Generation 1 - CAMSS + +The first generation of the ISP hardware block supported by the (aptly named) CAMSS driver is called CAMSS by Qualcomm. + +![Qualcomm ISP Generation 1, CAMSS Diagram](/linaro-website/images/blog/qualcomm-isp-generation-1-camss) + +The CSIPHY, CSID & VFE IP-blocks are relatively fully featured, but some functionality like Virtual Channels are not implemented. However, a more substantial limitation of the ISPIF support is that only basic cropping and rotation is currently implemented. + +An obstacle caused by this limited ISPIF functionality is that it’s not able to do format conversions using the ISP. So whatever format the camera sensor is outputting is directly output to userspace. This can be a problem for Bayer pixel format sensors, because they're poorly supported by userspace applications and require at least debayering to be done in a post-processing step before the output can be viewed. + +### Qualcomm ISP Generation 2 - Titan + +The next iteration of the Qualcomm ISP architecture is called Titan. The changes from Gen1 to Gen2/Titan can be summarized in two parts. + +![Qualcomm ISP Generation 2, Titan Diagram](/linaro-website/images/blog/qualcomm-isp-generation-2-titan) + +The Titan frontend blocks (CSIPHY, CSID & VFE) are almost identical between Gen1 and Gen2 with only minor structural changes and improvements. + +However, the image processing pipeline has been revamped, and the ISPIF no longer exists. What replaces it is an embedded CPU which is fed commands and in turn configures the data processing blocks. Adding support for this CPU is beyond the current scope of CAMSS due to the amount of work it would take to enable and the lack of documentation for the CPU command stream. However, userspace applications are able to manage some of this post processing. This is enough to support the most basic use cases, but many will be CPU/GPU intensive and likely not have the same quality as an ISP based implementation would. + +#### Future + +Linaro will continue to maintain this driver, and is likely to extend it to support additional hardware platforms. However, contributions are very welcome and platforms like the SDM630 and SDM660 have already had support contributed to the CAMSS driver. + +Currently [libcamera](https://libcamera.org/index.html) is a very useful development tool for working with CAMSS. It is more flexible than most V4L2 applications, and using the [libcamera/qcam](https://libcamera.org/getting-started.html) application makes it possible to view live output of even Bayer camera sensors without the ISP doing any debayering. + +Recent Linaro contributions towards libcamera have enabled GPU accelerated format conversion and debayering for the [libcamera/qcam](https://libcamera.org/getting-started.html) test application, you can read more about it [here](https://www.linaro.org/blog/accelerating-libcamera-qcam-format-conversion-using-opengl-shaders/). + +Other Linaro contributions to the Linux camera landscape include work in both user space with libcamera and in the kernel with camera sensor drivers, ISP drivers and V4L2 API work. Linaro has also contributed the Open Embedded libcamera recipe for developers building their products with Yocto Project based Linux distributions. + +Further developments regarding software and GPU debayering are expected for libcamera, and hopefully the community will see libcamera debayering enabled for all of its data paths soon enough. + +##### Acknowledgments + +A lot of different contributors enabled this work, both directly and indirectly. + +* Todor Tomov for creating the camss driver. +* [Jonathan Marek ](https://gitlab.freedesktop.org/flto)for trail-blazing Titan driver work. +* [Andrey Konovalov ](https://github.com/andrey-konovalov)for testing, finding bugs & being a great sounding board. +* Qualcomm for sponsoring this work. + +For more information about Linaro and the work we do, [contact us here](https://www.linaro.org/contact/). diff --git a/src/content/blogs/upstream-linux-support-now-available-for-the-the-qualcomm-snapdragon-8-gen-2-mobile-platform.mdx b/src/content/blogs/upstream-linux-support-now-available-for-the-the-qualcomm-snapdragon-8-gen-2-mobile-platform.mdx new file mode 100644 index 0000000..5d767f9 --- /dev/null +++ b/src/content/blogs/upstream-linux-support-now-available-for-the-the-qualcomm-snapdragon-8-gen-2-mobile-platform.mdx @@ -0,0 +1,103 @@ +--- +title: Upstream Linux support now available for the Snapdragon 8 Gen 2 Mobile Platform +description: "In this blog, we look at what features Linaro has upstreamed for + the Snapdragon 8 Gen 2 Mobile Platform and how to run an AOSP image using + Mainline. " +date: 2022-11-22T10:29:17.000Z +image: linaro-website/images/blog/Tech_Background +tags: + - android +author: neil-armstrong +related: [] + +--- + +Linaro Engineers Abel Vesa and Neil Armstrong enabled upstream Linux on the [recently announced](https://www.qualcomm.com/news/releases/2022/11/snapdragon-8-gen-2-defines-a-new-standard-for-premium-smartphone) Snapdragon 8 Gen 2 Mobile Platform, the newest Snapdragon processor. The initial support was posted on November 16th on the Linux kernel mailing lists for review by the Linux developers community. With the set of patches released by Linaro engineers, it is also possible to boot an AOSP mini image. Since 2014, Linaro Engineers have been working closely with Qualcomm Engineers to enable Snapdragon platforms to work with Mainline Linux. + +This is a significant achievement to be able to run such a recent upstream Linux kernel right after the announcement of a new SoC, and a testimony to the close working partnership between Qualcomm and Linaro. + +# What has been upstreamed for the Snapdragon 8 Gen 2 Mobile Platform? + +With the recent series of patches released by Linaro, the following features are enabled for the Snapdragon 8 Gen 2 Mobile Platform: + +* Qualcomm® Kryo™ CPUs, including DVFS and Power Control +* System foundation: Clocks, Power controllers, PMICs +* Low-Speed I/O: I2C, SPI +* High-Density Storage: UFS, SDXC +* High-Speed Peripherals: PCIe Gen3 and Gen4, USB SuperSpeed +* Qualcomm® Hexagon™ Processor SubSystems: Audio, Sensors, Compute and Modem + +All patches sent for review are also integrated and available in the following [development branch on CodeLinaro.org](https://git.codelinaro.org/linaro/qcomlt/linux/-/tree/topic/sm8550/next-20221115-aosp). + +![Snapdragon Development Kit](/linaro-website/images/blog/snapdragon-development-kit) + +## How do I run AOSP using Mainline? + +One might think it is quite hard to run AOSP with mainline on such a new platform, but in reality, not at all! Thanks to the long term effort of Linaro and Google engineers making it possible to run AOSP with vanilla Linux releases. +To generate an AOSP image for the Snapdragon 8 Gen 2 development kit using the current set of patches available on the mailing list, use the following instructions, which are derived from here [https://source.android.com/docs/setup/build/devices](https://source.android.com/docs/setup/create/devices) with some small changes. + +Download the Android source tree: + +``` +$ mkdir AOSP +$ cd AOSP +$ AOSP=$PWD +$ repo init -u https://android.googlesource.com/platform/manifest -b master +$ repo sync -j`nproc` +``` + +Prepare SM8550 device config by simply re-using the SM8450 one and disabling MMC inline encryption: + +``` +$ cd device/linaro/dragonboard +$ find . \( -type d -name .git -prune \) -o -type f -print0 | xargs -0 sed -i 's/sm8450/sm8550/g' +$ sed -i "s/,inlinecrypt//" fstab.common +$ sed -i "s/fileencryption.*_encryption,//" fstab.common +$ mv sm8450_mini.mk sm8550_mini.mk +$ mv sm8450/ sm8550/ +$ sed -i 's/sm8550-qrd/sm8550-mtp/g' sm8550/device.mk +``` + +Build the Linaro SM8550 tree containing the patches sent for review: + +``` +$ cd $AOSP +$ git clone https://git.codelinaro.org/linaro/qcomlt/linux.git \ + -b topic/sm8550/next-20221115-aosp sm8550-kernel +$ cd sm8550-kernel +$ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- \ + sm8550_aosp_defconfig +$ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- -j`nproc` +$ mkdir $AOSP/device/linaro/dragonboard-kernel/android-sm8550/ +$ cp arch/arm64/boot/Image.gz arch/arm64/boot/dts/qcom/sm8550-mtp.dtb \ + $AOSP/device/linaro/dragonboard-kernel/android-sm8550/ +``` + +Build AOSP + +``` +$ cd $AOSP +$ . build/envsetup.sh +$ lunch sm8550_mini-userdebug +$ make TARGET_KERNEL_USE=sm8550 -j`nproc` +``` + +Flash AOSP Images + +``` +$ cd out/target/product/sm8550/ +$ fastboot flash super ./super.img flash boot ./boot.img \ + flash userdata ./userdata.img reboot +``` + +## Next steps + +In the coming weeks, Linaro engineers will continue to work with the Linux kernel community to ensure all the patch series are merged in a timely manner. Additional patches are expected soon to enable display, audio and modem use cases. + +## Want to learn more? + +To find out more information on the ongoing work, check [https://lore.kernel.org/all/?q=SM8550](https://lore.kernel.org/all/?q=SM8550). + +The Snapdragon 8 Gen 2 Specification & features can be found here [https://www.qualcomm.com/content/dam/qcomm-martech/dm-assets/documents/Snapdragon-8-Gen-2-Product-Brief.pdf ](https://www.qualcomm.com/content/dam/qcomm-martech/dm-assets/documents/Snapdragon-8-Gen-2-Product-Brief.pdf). + +For more information about what Qualcomm platform services Linaro offers and how we can help develop, maintain and optimize products using Qualcomm technologies, go to [https://www.linaro.org/services/qualcomm-platforms-services/](https://www.linaro.org/services/qualcomm-platforms-services/). diff --git a/src/content/blogs/upstreaming-support-for-qualcomm-pcie-modems.mdx b/src/content/blogs/upstreaming-support-for-qualcomm-pcie-modems.mdx new file mode 100644 index 0000000..5dc0b74 --- /dev/null +++ b/src/content/blogs/upstreaming-support-for-qualcomm-pcie-modems.mdx @@ -0,0 +1,116 @@ +--- +title: Upstreaming Support for Qualcomm PCie Modems +description: > + In this article, Loic Poulain takes a detailed look at upstreaming support for + Qualcomm Pcie modems. Read about his findings here! +date: 2021-06-01T03:35:36.000Z +image: linaro-website/images/blog/technology-3389917_1920-1- +tags: + - linux-kernel + - open-source +author: loic-poulain +related: [] + +--- + +# Introduction + +**Wireless Wide Area Network (WWAN)** is a form of wireless network that relies on telecommunication technologies such as 3G or 4G cellular networks for transferring data, specifically IP packets, and thus offering internet access over mobile networks. From the user side, such a network is accessed via a ‘modem’ implementing one or several of the cellular protocols. + +The arrival of fifth-generation mobile networks, known as 5G, promises an even more connected world, with billions of devices, from smartphones through connected vehicles to tiny IoT gadgets. With Linux being the major OS in the embedded world, Linux support for WWAN modems is obviously a strategic topic. + +Qualcomm manufactures cellular modems such as the Snapdragon X24 (LTE) or X55 (5G) and these are integrated into various OEM WWAN modules. In the last 18 months, Linaro, in collaboration with multiple OEM modem manufacturers and with Qualcomm, has worked on upstreaming PCIe-based Qualcomm 4G/5G modems support in Linux. With further changes due in the Linux 5.13 release, using a Qualcomm PCIe modem just got a lot easier. + +# Linux WWAN support for USB modems + +For some time now, USB has become a de-facto solution to connect to WWAN/modems. WWAN support in Linux has therefore been mostly driven by USB-based modems integration. + +Unlike other wireless technologies like WiFi or Bluetooth, the Linux kernel does not offer a unified high level API and device model for WWAN modems. A USB WWAN device is usually enumerated as a set of multiple logical devices, such as: + +* TTY serial devices (/dev/ttyUSB\*, /dev/ttyACM\*) transporting ‘legacy’ AT commands and data via point-to-point protocol (PPP). +* cdc-wdm character devices (/dev/cdc-wdm\*) to transport modern binary based control protocols such as USB-IF MBIM (Mobile Broadband Interface Model) or QMI (Qualcomm Modem/MSM Interface). +* Network devices (e.g. wwan0 iface) used to transport data through USB interfaces optimized for network packet transfer, and implemented as CDC-ECM, CDC-NCM, RNDIS or CDC-MBIM USB classes, +* Virtual CD-ROM, usually hosting Windows/MacOS drivers and user manual (e.g. /dev/sr1 block device)… + +Though all the logical devices contribute to the WWAN/feature as a whole, they are each registered separately. This collection of devices varies depending on the manufacturers and models, and is possibly extended with additional interfaces for debug, firmware upgrade, GPS/GNSS and so on. + +Below is the kernel log output on Telit FN980 USB modem connection, it shows several devices being registered: + +![Kernel log output on telit fn980 usb modem connection](/linaro-website/images/blog/kernel-log-output-on-telit-fn980-usb-modem-connection) + +This heterogeneous and relatively raw interfacing scheme does not make modems straightforward to use from the user side. For example, the wwan0 network interface is not useful alone and requires configuration using specific commands from one of the control ports (e.g. cdc-wdm0) to pass traffic. Thankfully, some userspace tools have been developed to handle that complexity, such as ModemManager, which + +* Identifies which logical devices (tty, net, cdc-wdm…) must be collected together to expose a consolidated view of the ‘WWAN device’ to the user. +* Abstracts control protocols such as AT, QMI, MBIM to offer a high level unified control interface over DBUS (e.g. enable, connect, scan…). + +To accomplish this, ModemManager relies on protocol libraries (libqmi, libmbim), sysfs hierarchy, uevents and vendor plugins. + +# Qualcomm PCIe modems + +Increasingly laptop manufacturers and industrial OEMs are adopting modem designs based on PCIe. For the same generation, PCIe offers higher speed, lower latency and lower power consumption than USB equivalent, making it perfectly suitable for 5G high speed requirements (up to 20Gbps). Unfortunately, PCIe modems are also known as non working under Linux, suffering lack of proper drivers and infrastructure. + +PCI differs from USB because PCI devices do not offer high level operations and concepts such as USB transfers, sub-devices and endpoints/pipes (bulk, interrupt, control). Instead, PCI drivers are built on top of low level operations such as memory-mapped I/O and DMA (direct memory access) transfers, making them generally more complex. + +To provide something similar to USB interfaces and endpoints, Qualcomm created the **modem-host interface (MHI)**, which can be used by a host to communicate with any PCIe modem implementing this interface. MHI devices are able to expose multiple features and protocols over a set of predefined **channels**. Internally MHI is based on shared memory and ring buffers and defines device states, transfer procedures, channels, low power modes, etc. With this solution, Qualcomm modems can therefore be easily tuned to route the higher level data and control protocols (IP, AT, MBIM…) over either USB or PCIe/MHI transport buses. + +# The Linux MHI stack + +As already described by my colleague, Manivannan Sadhasivam, [the MHI core stack landed in Linux in 2020 (drivers/bus/mhi)](https://www.linaro.org/blog/mhi-bus-support-gets-added-to-the-linux-kernel/). Without getting into too much detail, MHI has been implemented as a standard **Linux bus**, where each physical device is registered as a ‘MHI bus controller’ and on which the logical channels are exposed as logical ‘MHI devices’ that are in turn bound to ‘MHI client drivers’. + +![The Linux MHI Stack](/linaro-website/images/blog/the-linux-mhi-stack) + +Interestingly, the first user of this stack was the ath11k PCI WiFi driver. Although the M in MHI stands for ‘Modem’, MHI only defines the infrastructure for communicating with logical devices and not which features or protocols must be exposed by these devices. + +With the MHI bus supported in mainline, Adding support for Qualcomm modems mostly consisted in implementing the missing lower (bus) and upper (functions) layers, respectively the **PCI MHI controller driver** and the **MHI client drivers.** + +# Adding support for PCIe MHI modems - mhi\_pci\_generic + +We started with the PCI MHI controller driver, and implemented it as a generic PCI driver compatible with all Qualcomm PCIe/MHI modems. This driver, mhi\_pci\_generic, is a tiny piece of code that essentially retrieves and prepares the PCI device resources (interrupts, memory mappings…) before registering a new MHI controller (e.g. mhi0). Once registered, the MHI core handles all the MHI operations and the PCI driver only acts as a physical bus abstraction layer for register accesses or low power transitions. + +We were pleased to find that no changes, except for a few bug fixes, were needed in the MHI core. Once the mhi\_pci\_generic driver had registered with the MHI core we were able to see the discovered channels and transfer our attention to drivers for the logical devices. + +For example, a Telit FN980 5G PCIe device exposes the following MHI TX/RX channels: + +* IP\_HW0: Is the path for network data, which is handled on the modem side by the IPA (IP hardware accelerator). +* QMI: A protocol for controlling the modem that is exactly the same as for the USB variant but is instead routed over PCIe/MHI. +* DIAG: is the modem diagnostic interface (also known as QCDM). + +![MHI Core](/linaro-website/images/blog/mhi-core) + +As any other bus, MHI devices (controllers, clients) are represented under sysfs hierarchy: +$ ls /sys/bus/mhi/devices +mhi0 mhi0\_DIAG mhi0\_IP\_HW0 mhi0\_QMI + +# MHI WWAN network driver - mhi\_net + +The IP\_HW0 device represents the data path and is a logical link to the Modem IP accelerator (IPA), and by extension to the cellular network. We Implemented a new **netdev driver**, mhi\_net to perform the bridging between the MHI layer (MHI transfers) and the Linux network stack (IP packets). + +![MHI WWAN Network Driver MHI Net](/linaro-website/images/blog/mhi-wwan-network-driver-mhi_net) + +# MHI WWAN control driver - mhi\_wwan\_ctrl + +For the control/debug channels (QMI and DIAG), we decided to expose them in a similar way as it is done in the cdc-wdm driver for USB modems, that is, rawly exposed to userspace via character devices. This way, adding MHI support for user tools already supporting USB modems would be as simple as changing a device name. + +Initially, we ported the mhi\_uci driver from the downstream MHI stack, which is a simple shim character driver converting device file read/write to MHI transfers, thus allowing raw MHI bus access to user space. However this driver never found its way to the Linux mainline due to concerns it would become a ‘generic backdoor interface’ for ‘everything qualcomm’, bypassing the usual kernel abstraction interfaces to transfer any opaque/vendor protocols. Maintainers were also reluctant to have yet another bus specific chardev for QMI (and MBIM…), and expressed their willingness to have more standardisation and unification for WWAN/modems in the kernel. + +After some LKML back and forth to refine things, we migrated to a better solution by splitting our work into two parts. The first of these was a generic new [WWAN subsystem](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/wwan/wwan_core.c), which is a hardware agnostic framework managing and exposing WWAN devices and their control ports. The second is a MHI specific WWAN port driver, [mhi\_wwan\_ctrl](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/wwan/mhi_wwan_ctrl.c), which registers to the newly created WWAN subsystem and performs the MHI bus adaptation. This way, the WWAN framework can be used with any further WWAN drivers. + +![MHI WWAN Control Driver MHI WWAN Cntrl](/linaro-website/images/blog/mhi-wwan-control-driver-mhi_wwan_ctrl) + +# WWAN made easy + +Linux 5.13 will be the first release including all the changes required to support any SDX55 or SDX24 based modem. It has been successfully tested with Telit FN980m and Quectel EM120GR-L modules, but more are coming and we already see other vendors adding their PCI IDs. + +To support this in userspace we also added WWAN/MHI support to ModemManager. This is currently only available in the development branches but we anticipate it being included in the ModemManager 1.18. + +With all those pieces, using a QCOM PCIe modem is as easy as with ethernet or WiFi networks. NetworkManager, either through command line utility (nmcli) or the graphical network settings, can be used to manage the ‘GSM’ connection: + +![The GSM Connection](/linaro-website/images/blog/the-gsm-connection) + +For WWAN, all you need is the APN, and optionally a pin to unlock the sim card. + +![Instructions on how to use WWAN](/linaro-website/images/blog/instructions-on-how-to-use-wwan) + +Enjoy! + +For more information on Linaro and the work that we do, [contact us here](https://www.linaro.org/contact/). diff --git a/src/content/blogs/using-energy-model-to-stay-in-tdp-budget.mdx b/src/content/blogs/using-energy-model-to-stay-in-tdp-budget.mdx new file mode 100644 index 0000000..0607283 --- /dev/null +++ b/src/content/blogs/using-energy-model-to-stay-in-tdp-budget.mdx @@ -0,0 +1,106 @@ +--- +title: Power Consumption of Embedded Devices +description: In this intriguing article, Daniel Lezcano looks into the power + consumption complexities of embedded devices and the challenges this creates. + Read more here! +date: 2020-07-29T10:41:16.000Z +image: linaro-website/images/blog/code-background_1 +tags: [] +related_projects: + - PERF +author: daniel-lezcano +related: [] + +--- + +# Introduction + +An ever-increasing number of embedded devices need fine grain control on their performance in order to limit the power consumption. There are three primary reasons for this: to increase the battery life, to protect the components and to control the temperature. + +Due to the increasing complexity of SoCs, we're now seeing lots of thermal sensors on the die to quickly detect hot spots and allow the OS to take steps to mitigate these events - either through better scheduling, frequency throttling, idle injection or other similar techniques. + +The performance states of a device usually follow a quadratic curve in terms of SoC power consumption which explains why it can have a very significant impact on the system. + +The power management is done from the kernel side with different frameworks: the cpufreq automatically adapts to the performance state via the operating points, depending on the system load, the thermal framework which monitors the components temperature and caps their performances in case of a hotspot detection. There are more techniques but, for the sake of simplicity, we won't mention them in this blog. + +Mobile devices are even more interested in managing power consumption because, depending upon the situation or the workload, the performance places higher or lower priority on certain components in regards to others. One example is virtual reality where a hotspot on the graphics can lead to a performance throttling on the GPU resulting in frame drops and a dizziness feeling for the user. Another example is the ratio between the cost in energy for a specific performance state vs a benefit not noticeable for the user, like saving milliseconds when rendering a web page. And last but not least, a battery low situation where we want to guarantee a longer duration before shutdown can create a unique prioritization scheme. + +This non-exhaustive list of examples shows there is a need to act dynamically on the devices' power from the userspace who has full knowledge of the running application. In order to catch unique scenarios and tune the system at runtime, the solution today leverages a thermal daemon monitoring the temperature of different devices and trying to anticipate where to reduce the power consumption, given the application is running. The thermal daemon turns the different “knobs” here and there, in every place where it is possible to act on the power. One of these places is the thermal framework which exports an API via sysfs to manually set the level of the performance state for a given device declared as a passive cooling device. + +Unfortunately, the thermal framework was not designed for that, as its primary goal is to protect the component at the limits. Thus the thermal daemon and the in-kernel governor will compete in their decisions. Moreover, some governors are open loop regulation systems where they make a connection between the state they choose and the cooling effect. If thermal daemons changes the decision, the connection is broken and the governor's logic can enter unknown states. + +This quick overview shows there is no unified framework to deal with power constraints on the system other than experimentation within the kernel and being opportunistic with the thermal framework cracks. + +## Related work + +The Intel processors perform power limitations on their CPUs via a specific register called Running Average Power Limit (RAPL). Over the years, the RAPL evolved to support more power zones such as the memory and the Psys (graphics, PCH, L3 cache). The latter controls the entire SoC power system. + +The Linux kernel provides a generic framework called 'powercap' which was introduced in 2013 and where it defines a set of sysfs APIs to limit the power on a specific device and to read the current power consumption levels. It is up to a subsystem to provide the backend driver to implement the different callbacks of this framework. + +Given the nature of the powercap architecture which describes the powercap zones in the sysfs directories, it is possible to model a hierarchy of power constraints. + +The Intel RAPL backend driver was introduced right after the powercap framework and allows the userspace to limit the power on the devices as well as reading their current consumption. + +The RAPL driver is the only backend making use of the powercap. + +In parallel with the introduction of the ARM big.LITTLE architecture, the scheduler needed the CPUs power information in order to make power aware decisions. This is when the energy model originated, providing the CPU's power information for each performance state. + +In addition, the Intelligent Power Allocator, a power aware thermal governor, used the power number of the CPUs and the GPUs introduced differently from the device tree file. + +A consolidation of the power aware cooling devices has been made to use the energy model instead of duplicating the code and that led to the generalization of the energy model to the devices. So it is potentially available on all the devices handled by the Linux kernel if their driver implements the callbacks to return the performance states and their power consumption. + +## A new powercap backend - energy model based + +As described previously, the mobile devices want to balance the power along with the components on the SoC depending on the kind of load or situation the system is facing: managing the power as a whole keeps the system inside its thermal envelope. + +This is where the energy model based powercap fits perfectly: + +* The sysfs hierarchy allows to model the constraints of the different devices on the SoC +* The energy model gives the power information of each device +* The performance state callbacks allow an application to limit the power +* The hierarchy allows an application to propagate the constraints on the different tree nodes and rebalance the free power along the child nodes +* The powercap framework offers a single place to act on the device power, allowing a consistent and unified API + +The hierarchy of the constraints is represented by a tree via the sysfs filesystem. The nodes of the tree are virtual and their purpose is to aggregate the power information from the child nodes: the power consumption is the sum of the child nodes power consumption. This also applies to the max and min power. The leaves of the tree are the real devices grouped per performance domain. If a power limit is set on a node, then the power limit is split proportionally to the children regarding their max power consumption. This power limit distribution to the child nodes is considered fair enough for most of the system using this approach like electricity grid in data centers. + +The powercap energy model can be under full userspace control where all the devices are power limited manually and individually by the userspace or, alternatively, the in-kernel logic can balance the power along the children nodes if there is free power remaining from devices with a power usage lesser than the limit. A mix of both is also possible, by setting power limits at different levels. + +As per choice, if a SoC vendor wants to manage individually the power of the devices on its system without having the kernel being involved in the power decisions, they can create a flat hierarchy where all nodes are leaves. + +If the SoC vendor wants to let the kernel manage all the power, it can set a power limit constraint at the root of the hierarchy. + +Finally, if the SoC vendor wants to manage a group of devices they can create subtrees where the power limit constraints at the intermediate node are set and let the kernel manage the power of the children nodes from there. + +## Status + +At the time of this writing, a first prototype was submitted for comments and review. This first draft puts a simple hierarchy with the CPU's performance domains. It shows how the powercap can be used to act on its performance states without conflicting with the kernel decisions. The performance state selection is done through the frequency QoS which is used by the other kernel subsystems and guarantees the aggregation of the requests in a sane way. + +[https://lkml.org/lkml/2020/7/7/1220](https://lkml.org/lkml/2020/7/7/1220) + +The in-kernel logic has been implemented in userspace to validate the automatic power balancing along the nodes and showed it works accurately even if there are tricky aspects regarding the integer precision, but nothing unsolvable. + +The code is available at [https://git.linaro.org/people/daniel.lezcano/powerem.git](https://git.linaro.org/people/daniel.lezcano/powerem.git) (22 July 2020) + +The algorithm has been presented at ELC 2020. See [here](https://ossna2020.sched.com/event/c3Wf/ideas-for-finer-grained-control-over-your-heat-budget-amit-kucheria-daniel-lezcano-linaro) + +## Future work + +Work remains to update the energy model to be generalized to support more devices: for instance the LCD brightness, battery charging mode, memory frequencies, GPU, DSP must be mapped to power numbers via the energy model. Usually the SoC vendors are reluctant to share this information but the algorithm can work if the power numbers are normalized. + +Another area of additional work is the power meter where we can genuinely estimate the power consumption given the device usage and its performance state. Obviously, a 50% loaded CPU will consume half of the power than a 100% loaded CPU at the same performance level. That will involve some mathematical and signal tracking. + +## Conclusion + +The powercap energy model based framework will need a lot of development where some efforts are technically challenging. As we are in the kernel we are restricted in terms of resources and the algorithm for the power allocation and distribution must be efficient and optimized to maintain consistent power consumption regarding the power limits. + +It is reasonable to say the in-kernel logic will greatly increase the efficiency of the power distribution as it can synchronously get the performance changes of the devices and adapt the allocated power budget. An operation where the userspace has to constantly poll the temperature to adapt the performances, a laggy implementation prone to more power consumption as it becomes a source of wakeup but an inevitable solution as no framework is available. + +The powercap energy model based framework will be a very powerful framework, flexible for userspace, unified for AOSP, consistent and safe to coexist with the existing frameworks. + +## [About the Kernel Working Group](/core-technologies/toolchain/) + +The Kernel Working Group’s (KWG) primary focus is to be an active contributor to the upstream community and facilitate acceptance of our code into the Linux mainline kernel. Our goal is kernel consolidation - a single source tree with integrated support for multiple Arm SoCs and Arm-based platforms. + +## About Linaro + +Linaro is a Member-based company focused on the de-fragmentation of the Arm software Open Source ecosystem. Linaro also supports the Arm ecosystem through customized services, training, and support. We would love to hear from you and see how we can help you with any Arm-based support, so please feel free to reach out to set up a sync at [https://www.linaro.org/contact/](https://www.linaro.org/contact/). diff --git a/src/content/blogs/using-the-arm-statistical-profiling-extension-to-detect-false-cache-line-sharing.mdx b/src/content/blogs/using-the-arm-statistical-profiling-extension-to-detect-false-cache-line-sharing.mdx new file mode 100644 index 0000000..4ecc340 --- /dev/null +++ b/src/content/blogs/using-the-arm-statistical-profiling-extension-to-detect-false-cache-line-sharing.mdx @@ -0,0 +1,90 @@ +--- +title: Using the Arm Statistical Profiling Extension to detect false cache-line + sharing +description: This article talks about how to use perf c2c tool with Arm Neoverse CPUs. +date: 2022-07-12T08:38:03.000Z +image: linaro-website/images/blog/code-background_1 +tags: + - arm +author: leo-yan +related: [] + +--- + +# Introduction + +Memory operations can introduce performance bottlenecks. In a non-uniform memory access (NUMA) environment, a typical case is if multiple items of data share the same cache line. Threads write to some items and read from the remaining items concurrently, and these threads reside on different CPUs and even across NUMA nodes - we call this “false sharing”. In this case, it’s expensive for the cache coherency operations and causes a significant performance penalty. + +Perf c2c tool observes cache-to-cache line contention and allows us to identify the places in the code that provokes these cache activities, therefore, the tool can teach developers to optimise data structures (e.g. using per CPU data, or use “aligned” attribute with compiler) for avoiding false sharing. We cannot detect false sharing issues purely in software, this is why perf c2c relies on underlying hardware mechanisms to inspect cache activity. x86 machines offer the hardware capability and this is already supported by the perf c2c tool. With the introduction of the Statistical Profiling Extensions (SPE) in Armv8.2 we can now provide similar analysis for Arm machines. + +SPE is a hardware tracing mechanism that can be used for memory profiling, furthermore, recent Arm cores including the CPUs of Neoverse family implement the data source packet in SPE to identify where data is originally from, which gives us an opportunity to enable perf c2c tool on Arm platforms. This article introduces how to use perf c2c tool with Arm Neoverse CPUs. + +# The implementation of perf c2c on x86 architecture + +First let’s review the implementation of perf c2c on x86 architecture. On x86 the command perf c2c record uses the events ‘mem-loads’ and ‘mem-stores’ to record the memory operations: + +``` + /sys/devices/cpu/events/mem-loads + /sys/devices/cpu/events/mem-stores +``` + +Afterwards, we can use perf c2c report command to output the shared cache line table, for every sorted cache line it shows metrics “Total records”, “Total loads”, “Total stores”, etc; followed by more breakdown metrics for loads and stores. + +![Shared Data Cache Line Table](/linaro-website/images/blog/shared-data-cache-line-table) + +The question is how do you identify cache lines that are false sharing? A feature called Processor Event Based Sampling (PEBS) provides data source for memory operations; an important concept “HITM” is introduced in PEBS, HITM stands for “Hit modified cache line”, which means a memory operation hits a modified copy from another processor’s cache line. The perf tool sets the snooping flag PERF\_MEM\_SNOOP\_HITM in the memory sample’s “data\_src” field for the HITM operations. + +Furthermore, combining with cache level info (e.g. L3 cache level or remote cache), a memory operation can be decided if it is a local HITM or a remote HITM. The distinguished HITMs are very useful to locate NUMA performance issues since a remote HITM indicates sharing that crosses memory nodes. + +As result, the below view shows how false sharing happened within a cache line. HITM are classified as metric “RmtHitm” for remote HITMs and metric “LclHitm” for local HITMs, these two metrics tell us that the modified cache is snooped from local peer CPUs and from remote nodes respectively, and from the store metrics we can know how the data items are modified in the program. This view is printed out by default in stdio mode; in text-based user interface (TUI) mode we need to select a cache line with enter and then press key ‘d’ so the view can pop up. + +![Shared Data Cache Line Distribution Pareto Image](/linaro-website/images/blog/shared-cache-line-distribution-pareto-image) + +# Enabling Arm SPE in perf + +The Statistical Profiling Extension provides a different mechanism to trace cache activity. Instead of the 'mem-loads' and 'mem-stores' events used on x86, Arm SPE systems provide a mechanism to run a sampling-profiler over memory operations. As an operation is sampled in SPE, its PC value, event type, latency, physical and virtual data addresses for data accessing are recorded; at last, a sample is recorded as packets and saved into memory. Note, SPE inherently is a statistical profiler, rather than instrument for every memory operation, users can specify the programmable interval for the recording, and even a pseudo-random jitter can be enabled for random intervals, the profiling result might lose some resolution but we can benefit from it without significant overhead. + +In order to integrate Arm SPE in perf tool, we needed to be able to use Arm SPE as a source of memory events on Arm64. We extended the perf tool to do this, whilst also keeping support for traditional memory events found in other architectures (like x86, powerpc). This is accomplished with a weak function perf\_mem\_events**ptr(). This function’s argument is a general event type (PERF\_MEM\_EVENTS**LOAD, PERF\_MEM\_EVENTS**STORE, or PERF\_MEM\_EVENTS**LOAD\_STORE) and it returns back the corresponding hardware performance monitoring unit (PMU) event. For the Arm64 arch, we redefined this function to return the Arm SPE PMU event as memory events. Thus we can record the Arm SPE trace data. Note, the recorded trace data is merely raw data, we can use the command perf script -D to dump the SPE raw data with packet wise: + +![Using perf to dump Arm SPE Raw Trace Data Image](/linaro-website/images/blog/using-perf-to-dump-arm-spe-raw-trace-data-image) + +During the decoding phase, we need to synthesise memory samples based on SPE raw trace data, and the memory samples at the end can be consumed by perf tool. A memory sample contains fields for instruction pointer (IP), virtual and physical data addresses, alongside with a field “data\_src”. At this stage of development there was enough for enabling perf mem, we can use perf mem to capture SPE trace data and output the statistics result based on synthesised memory samples on Arm64: + +``` + perf mem record -- test_program + perf mem report +``` + +The data source packet in SPE trace data has no unified format but is implementation dependent, therefore, without the data source format SPE leaves too many gaps to monitor cache-to-cache activity and becomes a hurdle to enable perf c2c… until Ali Saidi (Amazon) sent a patch series to support data sources for the Arm Neoverse family of cores. + +# Perf c2c tool extension for Arm SPE + +It was critical that Ali Saidi explored the data source format for Arm Neoverse cores; we can find the data source definition in the documentation “[Arm ® Neoverse™ N2 Core Technical Reference Manual](https://developer.arm.com/documentation/102099/0000/Statistical-Profiling-Extension-support/Statistical-Profiling-Extension-data-source-packet)”, section “22.2 Statistical Profiling Extension data source packet”. Arm Neoverse cores have four kinds of data source that indicate snooping is taking place from peer cores: Peer core (0b1001), Local cluster (0b1010), Peer cluster (0b1100) and Remote (0b1101). + +These four data source values can tell us that the peer snooping happens, this is not as rich as x86 HITM which is also able to report on data access from a modified copy in the peer cache line, but it is quite sufficient to identify cache-line sharing. After some discussion, we decided to introduce a new snooping flag: PERF\_MEM\_SNOOPX\_PEER to indicate that the data is fetched from peer cache lines. Connecting with code, the flag PERF\_MEM\_SNOOPX\_PEER would easily help us to locate false sharing cases. + +The rest of the work was to support the flag PERF\_MEM\_SNOOPX\_PEER in perf c2c, we need to let perf c2c tool sort cache lines based on this new flag. Similar to HITM tags, we treat the data sources “Peer core”, “Local cluster”, “Peer cluster” as local peer accesses, and “Remote” as remote peer accesses, the remote peer access accounting is expected to be helpful for NUMA performance analysis. + +As a result, I sent out [a patch series](https://lore.kernel.org/lkml/20220604042820.2270916-1-leo.yan@linaro.org/) containing Ali’s patches and perf c2c tool extension, which introduces a new display type “peer” for sorting shared cache lines with the SNOOPX\_PEER flag. It is the default display type on Arm64 platforms; with the changes perf c2c tool can parse false sharing on Arm Neoverse (N1/N2/V1) CPUs. We hope this patch series can be landed on the mainline kernel in the merge window for v5.20. + +# Reporting cache-to-cache activity on Arm64 + +Let’s demonstrate an example at the end, the command “perf c2c record” traces memory accessing with Arm SPE, and “perf c2c report” outputs results shown below. + +“perf c2c report” supports four display types, three types (“rmt”, “lcl” and “tot”) of them are for displaying with HITM tags, and the new added “peer” type is to sort cache lines with the SNOOPX\_PEER flag. A minor improvement for the tool is to automatically select appropriate display type based on the session environment, it uses “peer” as default type for reporting Arm64 perf data and alternatively uses “tot” display for x86, thus users don’t need to explicitly specify the display type. + +``` + perf c2c record -- test_program + perf c2c report + perf c2c report -d peer (Explicitly specify display peer type) +``` + +The result outputs a new metric “Peer Snoop” which shows the peer snooping happened on two cache lines with base address 0x420180 and 0x420100, then if we look into details for every cache line, with observing two metrics “Peer Snoop Rmt” and “Peer Snoop Lcl”, we can easily conclude the accesses for virtual address 0x400bd0 and 0x400c74 provoked a big amount of peer snooping, afterwards we can explore optimizations for corresponding structures. + +![Perf c2c outputs with Arm SPE trace data image](/linaro-website/images/blog/perf-c2c-outputs-with-arm-spe-trace-data-image) + +# Acknowledgement + +Credits and thanks to Ali Saidi for enabling data source on Arm Neoverse CPUs, and contributed the idea for the snooping flag. Thanks to Adam Li (Ampere), German Gomez (Arm) and James Clark (Arm) for continuous testing and reviewing. Joe Mario (Redhat)’s suggestions and feedback for extension perf c2c are very valuable, appreciated! Finally, thanks for Arnaldo Carvalho de Melo (Redhat) for his background support, for Steven Miao (Arm) pointing me the documentation that how AMBA bus topology maps to SPE data source encodings, and for Haojian Zhuang (Linaro) sharing Hisilicon D06 board. + +Enjoy the tool on your platform! If you are interested in perf c2c and luckily have an Arm Neoverse platform in hand, you are encouraged to try this new debugging feature with [the test case](https://github.com/joemario/perf-c2c-usage-files/blob/master/false_sharing_example.c) (and with applying the patches mentioned in this article on mainline kernel). And welcome patches to support Perf c2c on new Arm CPU variants, you could refer to [the patch](https://lore.kernel.org/lkml/20220517020326.18580-6-alisaidi@amazon.com/) for how to do it. diff --git a/src/content/blogs/virtio-work.mdx b/src/content/blogs/virtio-work.mdx new file mode 100644 index 0000000..e2f787b --- /dev/null +++ b/src/content/blogs/virtio-work.mdx @@ -0,0 +1,183 @@ +--- +author: alex-bennee +published: true +title: Working on VirtIO +description: > + In this article, Alex Bennee provides a summary of the history of VirtIO and + the areas Linaro is working on for the future. Read more here! +date: 2020-05-20T16:00:00.000Z +image: linaro-website/images/blog/tech_background_1 +tags: + - qemu +related_projects: + - STR +related: [] + +--- + +![virtio diagram](/linaro-website/images/blog/virtio) + +# Introduction + +In the world of virtualisation you can present whatever hardware you +like to the guest. From the guests point of view it thinks it is +running on real hardware right down to the clunky serial ports and +legacy interfaces. However providing this sort of fidelity comes at a +cost to system as a whole. Each time the guest accesses something that +isn't really there, for example a memory mapped IO address (MMIO), the +access "trapped" by the hypervisor. The system will then context +switch to the hypervisor which will either handle the emulation of the +device or pass the event deeper into the virtualisation stack with +each additional context switch adding to the total time taken to +process the request. In real systems accessing a MMIO register may +well be slower than accessing RAM but in virtual systems it can +involve executing tens of thousands of instructions before control is +returned to the guest. + +# Para-virtualisation + +The concept of para-virtualisation is not a new one. A +para-virtualised guest is simply one that has been specifically built +to run in a virtual machine. It will then use a para-virtualised +interface to more efficiently access resources instead of having to +emulate an existing piece of real hardware. The [Xen](https://xenproject.org/) hypervisor was an +early example in the Linux world which required a specially built +Linux kernel image that would directly invoke a hypercall rather than +force the hypervisor to expensively emulate a processor feature. Part +of that para-virtualisation included I/O drivers split into a +front-end (in the guest kernel) and a back-end (handled by the +hypervisor). + +# Enter Virt-IO + +As Xen was slowly working towards up-streaming their guest support +into the Linux kernel there was other virtualisation activity going +on. In 2006 the KVM project came out and was quickly merged into the +kernel. Ozlab's Rusty Russel, who had written his own [lguest](http://lguest.ozlabs.org/) +virtualisation module, was worried that with the proliferation of +support for various hypervisors there was a danger of every one having +multiple solutions for para-virtualisation of I/O operations. His +[original virtio paper](https://ozlabs.org/~rusty/virtio-spec/virtio-paper.pdf) proposed 3 core concepts: + +## Common configuration model + +All Virt-IO drivers have a common driver API to deal with things like +configuration and feature negotiation. This makes them fairly +simple to understand as they are not having to deal with the vagaries +of real hardware. Drivers for real hardware often target a family of +chips but have to be careful to ensure each individual chip has it's +own quirks and workaround. + +## Virtqueue transport abstraction + +Virtqueues provide the mechanism by which buffers are passed between +the guest and the host. Guests will add buffers to the queue before +"kicking" the host to notify it of the waiting buffers. The host in +turn can queue buffers and trigger a callback in the guest which +function much the same way as interrupts do for real hardware. + +## Replaceable transport implementation + +The original paper suggested a simple ring buffer implementation but +acknowledged that there was scope for other transports to be used. By +separating the low level transport from the driver abstraction any +particular hypervisor requirements can be kept on one place without +having to address them in the drivers themselves. + +# Standardisation + +By 2014 VirtIO had [seen fairly wide adoption](https://lwn.net/Articles/580186/) not only in the Linux +Kernel but support in other hosts such as VirtualBox and FreeBSD's +[bhyve](https://wiki.freebsd.org/bhyve) hypervisor. In 2012 Rusty had been approached by ARM who wanted +to clear any potential IP issues before using virtio in their [Fast +Models](https://developer.arm.com/tools-and-software/simulation-models/fast-models). He realised that having the specification scattered over blog +posts and patches wasn't sustainable and it was time for a formally +published standard where any potential IP issues could be made clear. +That work was turned over to the [Organization for the Advancement of +Structured Information Standards](https://www.oasis-open.org/) who are a non-profit group concerned +with developing open standards and interoperability. Work on the +VirtIO standard is now done in the open following the OASIS guidelines +and as of now has reached the [v1.1 of the spec](https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html). + +# Collaborating for the Future + +It should be clear now that VirtIO is well established as good model +for virtualised hardware - indeed you can now get real hardware which +implements the [VirtIO programming model](https://kvmforum2019.sched.com/event/TmxF/virtio-without-the-virt-towards-implementations-in-hardware-michael-tsirkin-red-hat). It has attracted the interest +of a number of our members and as Linaro is a place for collaboration +we are well positioned to get involved in the furthering of this open +standard. There are a number of areas of particular interest we are +currently looking at. + +## Enabling VirtIO on new hypervisors + +When virtualisation was first introduced to the ARM architecture it +was envisioned that most ARM hypervisors would be "type-1" or +"bare-metal" hypervisors. In this model the hypervisor is a very +lightweight layer at the highest privilege level which then may +offload more complex device emulation to lower privileged domains. In +Xen this is often Linux running in "dom0". While KVM's "type-2" +approach is well established in the server space there are still good +use cases for the traditional "type-1" approach. These include places +like automotive who want to ensure reliable partitioning of resources +and the mobile space where we are seeing the introduction of [secure +virtualisation](https://developer.arm.com/architectures/learn-the-architecture/armv8-a-virtualization/secure-virtualization). + +To support VirtIO on hypervisors such as Xen, Google's [Hafnium](https://opensource.google/projects/hafnium) and +[Project ACRN](https://projectacrn.github.io/latest/introduction/) will require adding support for new transport layers for +carrying signalling from the guest driver to the backend as well as +the ability to share parts of the guests memory with whatever might be +providing the backend of the device. Generally thanks to VirtIO's +layered approach the front ends are entirely untouched. + +## Expanding vhost-user + +While the front-end of a particular piece of virtual hardware always +looks the same to the guest there are a number of ways the host can +deal with the data. The traditional approach relies on the hypervisor +or Virtual Machine Manager (VMM) terminating the VirtIO transaction +before queuing the data through the host devices. A more optimal +approach involves the guest injecting data directly into the host +kernels outgoing data streams - this is known as vhost. Finally all +VirtIO handling can be handed off to a separate user space process and +dealt with there, referred to as vhost-user. + +The original driver for vhost-user was [to support high throughput +dataplanes](https://www.redhat.com/en/blog/how-vhost-user-came-being-virtio-networking-and-dpdk) such as [DPDK](https://www.dpdk.org/) which would busy-wait on queues rather than +have the overhead of interrupt based signalling. vhost-user is simply +a [protocol describing the messages](https://qemu.readthedocs.io/en/latest/interop/vhost-user.html) sent to the user space daemon to +signal that virtqueues need processing. Traditionally these drivers +have visibility of the entire guest address space to access the +virtqueues. We would like to support limiting the address space to +only the portions of the guest's address space required to do it's job. +This in turn would allow interesting architectures where the back-ends +could be separated out into their own contained virtual domains. + +## Standardisation of more devices + +While the original driver for VirtIO was to implement efficient +high-performance devices the simplicity of the programming model makes +it appealing as a form of Hardware Abstraction Layer (HAL). Having a +common HAL enables the individual implementation details to be kept to +a small part of the code base. For example one model could see the +main mobile OS in a virtual machine communicating with virtual +devices. The details and individual quirks would be handled by a +smaller self-contained set of components for each platform. This would +simplify the process of validating and releasing new builds of the +main OS which could help extend the software support lifetime of a +device. + +Devices of interest include things like Replay Protected Memory Block +(RPMB) which provide secure storage services on phones. There is also +interest in having standardised sound and video devices for use-cases +like in-car entertainment systems. A more knotty problem involves how +to manage the power state of devices on things like mobile phone +platforms. Unlike servers phones are constantly trying to manage the +power budgets for devices against the current demand. This involves +making decisions about how much voltage a part is supplied with or +what rate it's clock is run at. We want to explore how these sort of +requirements are best dealt with in a virtualised environment. + +As Linaro is an "upstream first" organisation we do our work in the +open on public mailing lists and repositories. If you are interested +please [do get in touch](https://www.linaro.org/contact/). diff --git a/src/content/blogs/what-is-linaro.mdx b/src/content/blogs/what-is-linaro.mdx new file mode 100644 index 0000000..71c3737 --- /dev/null +++ b/src/content/blogs/what-is-linaro.mdx @@ -0,0 +1,27 @@ +--- +title: What is Linaro? +description: The question “what is Linaro?” usually comes shortly after “what + company do you work for?” I have a variety of answers honed over the years. + Read them here! +date: 2019-09-13T00:15:44.000Z +image: linaro-website/images/blog/davidblogsep2019 +tags: + - arm + - linux-kernel + - open-source +author: david-rusling +related: [] + +--- + +The question “what is Linaro?” usually comes shortly after “what company do you work for?” I have a variety of answers honed over the years. I usually ask the Arm question, that is, “Do you know Arm?”. Time was this got blank looks but, lately, most people know about Arm, or at least the Arm processor in their mobile phones. If not, I tell them about Arm’s IP model and how they license technology and how their partners bundle that technology with their own to build devices. Some know, but the majority don’t, that their mobile has a Qualcomm chip in it or that that chip has Arm IP in it, in the form of the CPU architecture, fabric and peripherals. What most do not know is that Arm is everywhere from your car, your washing machine, your camera, your router, your printer and so on. You get the drift. Ubiquitous. + +With hardware out of the way, I can talk about software. Again, this is much easier than it used to be. People know about Android so it’s easier to answer and talk about open source and how it works. So far, all I’ve managed to do by this point in the conversation is, if I’m lucky, to describe Arm’s Intellectual Property (IP) model and the rise and rise of open source. I might also mention that the biggest part of my career has been at the mixing point of Arm IP and open source; a combination of strong evolutionary forces that have, literally, revolutionized all our lives. Where would social media be without mobile phones? + +What I haven’t managed to do so far is to describe what Linaro is and what it does, but I’m getting there, albeit slowly. The thing is, there’s an awful lot of software that needs to be worked on in order to support hardware in a given industry ecosystem. Millions of lines, I say. Even with fierce competitors (I once likened the Arm partnership to a bunch of pirates), there is a need to collaborate on common software and frameworks. The collaboration is done by a mixture of Linaro engineers (paid for by our membership fees) and engineers from the members. In Linaro’s early days, it was more a question of tidying up the upstream code bases so that the various Arm based SoCs could benefit from features, bug fixes and security updates. This ‘work upstream’ mantra is still true but is now widely accepted in the Arm ecosystem. It’s just how software is developed. Most of Linaro’s efforts these days are in opening up various industry ecosystems to the Arm partnership. + +What do I mean by ‘ecosystem’? As an example, Android is an ecosystem. These ecosystems are represented in Linaro by Segment Groups. Linaro currently has four – the Consumer Group (which represents all things Android), the Datacenter & Cloud Group, the Edge & Fog Computing Group and the IoT & Embedded Group. We are in the process of establishing two more, one focused on Artificial Intelligence and the other Automotive. Each Linaro group is supported by members and representatives from that ecosystem. One of the first things the committee establishes is the scope of the group, just what it is that the group is trying to achieve. Each group also decides what software is important to the Arm ecosystem and what software frameworks need to exist in order to properly support the wealth of Arm based hardware. A good example of this is the work that is happening in artificial intelligence as the newly group works to establish frameworks allowing many different types of hardware acceleration from FPGAs to custom Neural Network acceleration hardware. + +Today, Linaro is where the Arm partnership collaborates strategically on ecosystems that are important to it, and engineers solutions that benefit Linaro’s membership in particular, and the Arm ecosystem in general. Assuming that my mythical interlocutor is still there, he or she might suggest that Linaro is like the Linux Foundation. My answer is ‘not really’. The Linux Foundation hosts open source projects, allowing companies to help set up traditional open source maintainerships and so on. This is valuable, but Linaro is actually about coordinated, collaborative engineering across a range of, projects. Whilst what we do is good for the software projects that we get involved with, ultimately it also benefits our members as they release products. + +Interested in working with the experts at Linaro? Check out [our Careers](https://www.linaro.org/careers/) page to find what positions we have available. diff --git a/src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx b/src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx new file mode 100644 index 0000000..f0a9a11 --- /dev/null +++ b/src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx @@ -0,0 +1,50 @@ +--- +title: What to expect from Linaro at the Embedded Linux Conference (Europe) 2019 +date: 2019-10-22T09:48:58.000Z +image: linaro-website/images/blog/40965990761_090a30658a_k +tags: + - linux-kernel + - iot-embedded +author: bill-fletcher +related: [] + +--- + +Linaro will be exhibiting at ELC-E in Lyon next week, showing engineering solutions +for the Arm Ecosystem and how we work with members to consolidate codebases in the ecosystem as a whole and in specific market segments. + +**On the Linaro stand we’ll be talking about:** + +* the ecosystem and community projects that we host and/or contribute to - including Linux, toolchains, boot and security +* LAVA, our continuous integration system for deploying operating systems on to physical and virtual hardware for running tests; designed for validation during development +* Leveraging Linaro’s expertise on a custom project through Linaro Developer Services - how Linaro can help your company build, deploy, secure and maintain your code, including CI and automated validation + +**Our demonstrations:** + +* The Linaro open source hardware and software ecosystem around the STMicroelectronics STM32MP1 +* LAVA which powers the Linux Kernel Functional Test (LKFT) project + +![STM32MP1 community hardware demonstrating OpenAMP and Zephyr](/linaro-website/images/blog/openamp-demo) + +![Latest Developments in Linaro’s LAVA CI Infrastructure Project](/linaro-website/images/blog/lava-demo) + +You can find us, our members and projects on the following stands: + +**Linaro** - Stand B24 + +**STMicroelectronics** - Stand P1 + +**Arm** - Stand S3 + +**Red Hat** - Stand G3 + +**Trusted Firmware Linaro Community Project** - Stand B2 + +In addition to our booth presence, Andrea Gallo, VP of Membership Development from Linaro will also be presenting a session on AI/ML Deployment at the Edge on Monday 28 October at 16.20. Click [here](https://osseu19.sched.com/event/TLKj?iframe=no) to read the complete abstract. + +Even if you can't make it to ELC-E: + +* Check out latest developments in our on-going open source projects at [linaro.org](/) +* [Contact Linaro’s Developer Services](/services/) even if you’re a non-Member to leverage Linaro’s expertise + +*** diff --git a/src/content/blogs/windows-on-arm-and-the-possibilities-of-native-development.mdx b/src/content/blogs/windows-on-arm-and-the-possibilities-of-native-development.mdx new file mode 100644 index 0000000..0909a73 --- /dev/null +++ b/src/content/blogs/windows-on-arm-and-the-possibilities-of-native-development.mdx @@ -0,0 +1,46 @@ +--- +title: Native Support Gaps Within Windows on Arm +description: This blog talks about Windows on Arm, highlighting what is possible + & why porting natively is the way to go. Read more here. +date: 2021-10-12T01:05:49.000Z +image: linaro-website/images/blog/llvm-image +tags: + - windows-on-arm + - arm + - open-source +author: ebba-simpson +related: [] + +--- + +Linaro is working with Arm and Qualcomm to bring together other participants in the Arm ecosystem, identify gaps in the native support of key open source tools for Windows on Arm and set up the required upstream CI to produce the official binaries in collaboration with the upstream project maintainers. The aim is to create an open source community around Windows which supports third parties and ensures a good user experience. + +In April 2021, [Linaro announced the availability of the Windows 10 on Arm support and binary as part of the LLVM 12.0.0 release](https://www.linaro.org/news/linaro-arm-and-qualcomm-collaborate-to-enable-native-llvm-for-windows-10-on-arm/). This was the first LLVM release for Windows 10 on Arm and marked a significant step towards enabling developers to build natively with LLVM on Windows 10 on Arm. LLVM is one of the main tools the open-source community uses to compile their code. + +A few weeks ago, we hosted a panel at Linaro Virtual Connect Fall 2021 featuring Arm, Qualcomm and Microsoft where we discussed the current status of Windows on Arm and what work is left to do. This blog is based on public statements captured during the panel discussion on Windows on Arm, highlighting what is possible today and why porting natively is the way to go. [Click here to watch the complete discussion](https://resources.linaro.org/en/resource/jd7koxXDVxABYdZxK5h3xF). + +## The promise of Arm hardware - all day battery life and fast performance + +Arm has proven it is strong on power consumption, fast performance and battery life, making it a good choice for developing new device form factors. At present there are plenty of tools out there that allow you to emulate, meaning all the third party experiences delivered by Windows running on the Arm chip will just work. + +Your x86 application will work on Windows on Arm using Microsoft’s emulation platform built into the operating system. On Windows 11, x64 emulates on the device. There is even a new hybrid called Arm64 EC which allows you to bring together emulated assemblies and native assemblies into the same process to ease the steps of migrating. + +However, to truly leverage all day battery life and the highest level of performance, we need to move away from emulation and move towards porting natively. Porting natively not only improves overall performance and battery life but also reduces costs and uses fewer instructions. + +This is why Linaro has launched the Windows on Arm project, to work with the Arm ecosystem on creating the tools needed to natively port. + +## Windows on Arm today and how to get started developing + +A lot has happened since Windows on Snapdragon was first introduced in 2017. + +From the toolchain perspective there are several options to choose from, including the Windows 10 on Arm support and binary that was part of the LLVM 12.0.0 release ([and can be accessed here](https://www.linaro.org/downloads/#gnu_and_llvm)). + +When it comes to third party applications there are multiple options for emulation, using Microsoft’s emulation platform or Arm64 EC which is available with Windows 11. Arm64 EC allows you to run third party applications in emulation while your core components run native. Arm continues to work on upstreaming support and has so far upstreamed Elektron and CEF support, as well as contributed a native build of Chromium. + +As for testing, there are plenty of devices to choose from- both the standard commercial Surface ProX as well as [Qualcomm’s Snapdragon developer kit](https://developer.qualcomm.com/hardware/windows-on-snapdragon/snapdragon-developer-kit) are good options. You can read more about how to get started with Windows on Snapdragon [in this blog post](https://developer.qualcomm.com/blog/windows-snapdragon-developer-highlights) by Rami Husseini, Director of Product Management for Qualcomm Technologies, Inc. + +## How do I get involved? + +To get started there are plenty of resources on [developer.arm.com](https://developer.arm.com/) such as case studies which talk about moving to a native Windows application. Qualcomm and Microsoft Azure have also partnered to provide support to developers. So grab a device, start porting and get involved in the community. Compile it, see what dependencies you have and let us know what needs doing! + +For more information on the project and how to get involved, [go to the Windows on Arm project homepage here](https://linaro.atlassian.net/wiki/spaces/WOAR/overview). To find out more about Linaro and the work we do, [contact us here](https://www.linaro.org/contact/). diff --git a/src/content/blogs/windows-on-arm-now-supported-in-python-3-11-release.mdx b/src/content/blogs/windows-on-arm-now-supported-in-python-3-11-release.mdx new file mode 100644 index 0000000..c2a98fb --- /dev/null +++ b/src/content/blogs/windows-on-arm-now-supported-in-python-3-11-release.mdx @@ -0,0 +1,143 @@ +--- +title: "Windows on Arm now supported in Python 3.11 Release " +description: "In this blog we talk about the Python 3.11 release which supports + Windows on Arm. " +date: 2022-10-20T09:26:27.000Z +image: linaro-website/images/blog/blog_python_woa +tags: + - windows-on-arm +author: pierrick-bouvier +related: [] + +--- + +# Introduction + +If developers are exploring Windows on Arm and native development, they will be pleased to hear that Python, the most widely-used programming language, now has native support for Arm platforms. Starting with python 3.11, [an official installer](https://www.google.com/url?q=https://www.python.org/ftp/python/3.11.0/python-3.11.0-arm64.exe\&sa=D\&source=docs\&ust=1666699288024817\&usg=AOvVaw00yiqc79eiBrZfTL2ocsj5) for Windows on Arm is now available. It’s a great time for developers to start their journey targeting Windows on Arm. + +Python has been very successful thanks to its rich ecosystem, and especially pip packages ([https://pypi.org/](https://pypi.org/)). Some packages use native code (mostly C/C++), which usually involve some efforts to enable a new platform. + +In the last months, Linaro has been contributing to several key packages, and provided a CI machine to [build and test](https://buildbot.python.org/all/#/builders/729) for Windows on Arm. In this blog we talk about the work involved in making the official installer for Windows on Arm available and how you can get set up. + +# CPython + +[CPython](https://www.google.com/url?q=https://github.com/python/cpython/\&sa=D\&source=docs\&ust=1666699350663733\&usg=AOvVaw3qdlM8XIn4LjZTn45HBmt3) is the official implementation for Python and its standard library. It’s written in C and Python. + +Starting with version 3.8, experimental support for Windows on Arm was [added](https://bugs.python.org/issue36941), and it was possible to download it using [Nuget](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28657680903/Pre-release+Python+Installation#Download-from-NuGet) or [build it from source](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28657680903/Pre-release+Python+Installation#Compile-from-source) directly. + +With the release of Python 3.11, Windows on Arm is now listed as a supported platform ([Tier 3](https://peps.python.org/pep-0011/#tier-3)), and an installer is now available [here](https://www.google.com/url?q=https://www.python.org/ftp/python/3.11.0/python-3.11.0-arm64.exe\&sa=D\&source=docs\&ust=1666699288024817\&usg=AOvVaw00yiqc79eiBrZfTL2ocsj5). Great time for Pythonistas using a WoA machine 🐍🥳🐍! Note: This is still an experimental support, and some bugs can be present. + +Python is using buildbot for its [CI](https://buildbot.python.org/all/#/). Thanks to Linaro’s lab, we could provide a Surface Pro X to run a worker, that is now officially supporting [this](https://buildbot.python.org/all/#/builders/729) platform. + +# User’s perspective + +## Big picture + +Most of the software used in the python community is available through pip. + +Projects can be packaged in *wheels* that can contain compiled code, making it easy and fast to install for users. A wheel can be platform agnostic (if the package is pure python), or platform specific. + +When installing a package using **pip install mypackage**, pip automatically installs the packages and its dependencies for you. In this order, it will try to: + +* Find a platform agnostic wheel +* Find a platform specific wheel +* Build from source code + +## Build environment + +To be able to install a package using C/C++ code, you’ll need to install *[Visual Studio Community](https://visualstudio.microsoft.com/fr/vs/community/)* (available for free). You just need to select “Desktop development with C++” workflow, and everything should be installed correctly. From there, pip will be able to compile packages that do not offer wheels. + +Some python packages require additional tools and libraries for compilation. A few examples are CMake, LLVM, Rust, etc. Having them installed in your environment will help pip to install packages without any additional manual steps. You can refer to [Linaro's wiki page](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28594241585/Package+building+tutorial) that lists instructions and additional requirements for those. + +# Maintainer’s perspective + +When porting to a new platform, you have to choose between a native and a cross compilation workflow. That’s what we present in this section. More detailed information is available on [our wiki](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28598239406/Python). + +If your library is a pure python one, you’re good to go. If you depend on C/C++ code, keep on reading. + +## When porting is needed + +If your package is a pure python one, there is nothing to do. Pip can create and use “none” packages that don’t target a specific architecture, and they can be created from any machine. If that’s the case, you can skip this whole section. + +However, if you are using native code, through C/C++ for instance, you’ll have to make it work for Windows on Arm. Keep on reading to discover possible solutions. + +## Going native + +The good news is that everything is already handled in Python. It will correctly compile your package, without any further work needed. What’s more, working natively allows you to run tests. However, accessing a Windows on Arm machine might be the tough part. As of today, there is no “free” solution for that, you’ll have to invest in some technology: + +### Physical machines + +* Any Windows on Arm Laptop/Tablet ( for example, the [Microsoft Surface Pro X](https://www.microsoft.com/en-us/d/surface-pro-x/8xtmb6c575md?activetab=pivot%3aoverviewtab), Lenovo Thinkpad X13s, HP Elite Folio, Samsung Galaxy Book Go) +* Now available: [Microsoft Volterra](https://blogs.windows.com/windowsdeveloper/2022/05/24/create-next-generation-experiences-at-scale-with-windows/). A great devkit for developers. +* Raspberry Pi 4: Small but cheap and flexible to use. Here is [how to install Windows on it](https://www.tomshardware.com/how-to/install-windows-11-raspberry-pi). + +Apple M1: You can run a Windows on Arm machine for free and easily using [UTM](https://mac.getutm.app/). Pricey but might be worth it if you need to support that platform, too. + +### Cloud + +Since April 2022, it’s possible to run [Windows on Arm on Azure](https://azure.microsoft.com/en-us/blog/now-in-preview-azure-virtual-machines-with-ampere-altra-armbased-processors/). + +### CI/CD + +So far, there is no platform supporting Windows on Arm as a runner (in particular, it is lacking on GitHub and Azure DevOps). However, you can still set up your own machine and register it as a self hosted runner for your favourite environment. We tried that for GitHub, Azure DevOps, and Gitlab and it’s working well. + +### Cross compilation + +If you can’t have a dedicated machine, you can still try to cross compile your package. However, you’ll lose the ability to run tests. + +To enable cross compilation, some environment variables control: + +* How to compile a single C/C++ file ([VSCMD\_ARG\_TGT\_ARCH](https://setuptools.pypa.io/en/latest/history.html#v57-2-0)) +* How to package libraries in a wheel file ([SETUPTOOLS\_EXT\_SUFFIX](https://setuptools.pypa.io/en/latest/history.html#v57-4-0)) + +Both are now handled by [setuptools](https://github.com/pypa/setuptools) (>=57.4.0). In short, python distutils is now deprecated (with plans to remove it in python 3.12) and will be replaced by setuptools (details [here](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28593914166/setuptools-distutils)). + +To cross compile you have to set this environment: + +``` +# ensure minimal version of setuptools is available +python -m pip install "setuptools>=57.4.0" +# use distutils coming from setuptools (will be default in python 3.12) +set SETUPTOOLS_USE_DISTUTILS=local +# controls how to compile one file +set VSCMD_ARG_TGT_ARCH=arm64 +# how to name library file in wheel (set cp3XX for your python version) +set SETUPTOOLS_EXT_SUFFIX=.cp311-win_arm64.pyd +``` + +From there, you can create a wheel by using **pip wheel**, as usual. More [tips](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28700082189/Tips+and+tricks+for+porting+to+win-arm64) and [build instructions per package](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28594241585/Package+building+tutorial) are provided on [our wiki](https://linaro.atlassian.net/wiki/spaces/WOAR/overview). + +#### Limitations + +As seen above, your project should be using setuptools (with a recent enough version). + +Alas some projects are blocked by that: notably [numpy](https://github.com/numpy/numpy). They are now working to move away from old distutils package. + +In general, python 3.12 support should be the last step to make sure all packages can be easily cross compiled, as it will be mandatory to use setuptools. + +#### cibuildwheel + +This is a convenient tool that allows you to create wheels for different architectures, automatically. [This PR](https://github.com/pypa/cibuildwheel/pull/1144) adds support for cross compiling to win-arm64, and should be merged soon. + +#### Examples + +Some projects who started cross compiling for win-arm64: + +* [pywin32](https://github.com/mhammond/pywin32/blob/90c31cba7a3948b484e426a5673b0dbc61254f22/.github/workflows/main.yml#L64) +* [libclang](https://github.com/sighingnow/libclang/blob/8f64e5bc1a2bc9ddebf0cd262d096f5c262757dc/.github/workflows/libclang-windows-aarch64.yml) +* [maturin](https://github.com/PyO3/maturin/blob/main/.github/workflows/release.yml) + +## Work in Progress + +* cibuildwheel: PR to cross compile for win-arm64 +* Numpy: moving from distutils to meson, should enable cross compilation +* PyTorch : Linaro is working to enable Windows on Arm platform (status) +* Anaconda and Conda-forge: we have been discussing with Anaconda and conda-forge community to enable Windows on Arm platform. This is now in progress: stay tuned! + +## That’s all folks! + +This concludes our tour of Python for Windows on Arm. Even though the path to support all python packages is still long, there are plenty of packages already available. This effort will be continued, and python 3.12 should be the last step to democratise this, thanks to setuptools usage becoming mandatory. Considering Python is the most used programming language, a lot of developers will have the opportunity to program with Python on Windows on Arm natively. + +Microsoft and Linaro will continue to be involved in this enablement, and keep helping communities to enable Python for Windows on Arm. + +For more information on Linaro’s Windows on Arm Project and how to get involved, go to [our Project page](https://www.linaro.org/windows-on-arm/). From 0886cfa4f42c57d6bcab46a09681ad0671752cfa Mon Sep 17 00:00:00 2001 From: Louis Date: Wed, 22 May 2024 15:20:44 +0100 Subject: [PATCH 2/5] fix migrated blogs from 2018-2023 --- src/content/authors/brian-pang.md | 6 ++++++ ...-aiot-development-platform-bootprint-x2-kit.mdx | 1 + ...ntroducing-linaro-virtual-connect-fall-2021.mdx | 5 +++-- src/content/blogs/can-we-make-ai-super.mdx | 1 + ...omputing-hpc-reflection-and-forward-looking.mdx | 2 +- ...tests-over-a-million-linux-kernels-per-year.mdx | 2 +- .../linaro-connect-budapest-2020-cancelled.mdx | 6 +++--- ...ys-a-livestream-event-of-technical-sessions.mdx | 4 ++-- ...-the-upcoming-linaro-connect-san-diego-2019.mdx | 5 ++--- .../qemu-8-2-and-linaro-s-maintainer-story.mdx | 14 +++++++------- ...e-tooling-now-a-reality-in-the-linux-kernel.mdx | 1 + ...-multiple-devices-with-the-same-aosp-images.mdx | 2 +- .../blogs/tuxpub-the-serverless-file-server.mdx | 3 ++- ...t-the-embedded-linux-conference-europe-2019.mdx | 4 ++-- 14 files changed, 33 insertions(+), 23 deletions(-) create mode 100644 src/content/authors/brian-pang.md diff --git a/src/content/authors/brian-pang.md b/src/content/authors/brian-pang.md new file mode 100644 index 0000000..306f8c6 --- /dev/null +++ b/src/content/authors/brian-pang.md @@ -0,0 +1,6 @@ +--- +name: Brian Pang +first_name: Brian +last_name: Pang +image: linaro-website/images/author/unknown +--- diff --git a/src/content/blogs/96boards-and-horizon-robotics-jointly-launch-aiot-development-platform-bootprint-x2-kit.mdx b/src/content/blogs/96boards-and-horizon-robotics-jointly-launch-aiot-development-platform-bootprint-x2-kit.mdx index 05bf39d..d3b5145 100644 --- a/src/content/blogs/96boards-and-horizon-robotics-jointly-launch-aiot-development-platform-bootprint-x2-kit.mdx +++ b/src/content/blogs/96boards-and-horizon-robotics-jointly-launch-aiot-development-platform-bootprint-x2-kit.mdx @@ -1,6 +1,7 @@ --- title: "Linaro and Horizon Robotics jointly launch AIoT development platform: BOOTPRINT X2 kit" +description: Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, and 96Boards Partner Horizon Robotics today announced the launch of the BOOTPRINT X2 kit. date: 2019-10-31T11:48:43.000Z image: linaro-website/images/blog/27094831048_6ecb96f52a_o tags: diff --git a/src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx b/src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx index 8c0c6a6..7d1bf38 100644 --- a/src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx +++ b/src/content/blogs/automotive-hyperscalers-testing-on-arm-and-more-introducing-linaro-virtual-connect-fall-2021.mdx @@ -8,8 +8,9 @@ date: 2021-08-18T12:57:31.000Z image: linaro-website/images/blog/48784720458_63040ac998_k strap_image: /assets/images/content/48784720458_63040ac998_k.jpg tags: - - linaro-connect -author: connect + - virtualization + - automotive +author: linaro related: [] --- diff --git a/src/content/blogs/can-we-make-ai-super.mdx b/src/content/blogs/can-we-make-ai-super.mdx index 83e3af3..ad35212 100644 --- a/src/content/blogs/can-we-make-ai-super.mdx +++ b/src/content/blogs/can-we-make-ai-super.mdx @@ -1,5 +1,6 @@ --- title: Can we make AI Super? +description: Linaro works with hardware vendors and software developers to help coordinate and build the toolkits for improved calculation libraries. We work to defragment the market by supporting ONNX, TFLite and TVM to translate to Arm NN supported inferencing optimised hardware. Linaro's HPC group aims to assist in optimising libraries and infrastructure dependencies that distribute the calculation requirements across servers, clusters, HPC nodes and supercomputers. But beyond Machine Learning and inferencing, where is the full scope of the truly cognitive AI? In this blog, Linaro's HPC Tech Lead Paul Isaac's talks about the history of AI and future opportunities made possible through super computing. date: 2019-11-20T10:24:06.000Z image: linaro-website/images/blog/abstract1 tags: diff --git a/src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx b/src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx index 7aca2c1..e9043bc 100644 --- a/src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx +++ b/src/content/blogs/high-performance-computing-hpc-reflection-and-forward-looking.mdx @@ -36,7 +36,7 @@ For example: [Fuse multiply and accumulate (saxpy](https://developer.arm.com/docs/ddi0596/e/simd-and-floating-point-instructions-alphabetic-order/fmlal-fmlal2-vector-floating-point-fused-multiply-add-long-to-accumulator-vector)) y = a*x + y;* -*Dot products a*{ij} = x\*{ik}y\_{kj} (in tensor notation) as required in [gemm (matrix-matrix) operations](https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/neon-programmers-guide-for-armv8-a/optimizing-c-code-with-neon-intrinsics/optimizing-matrix-multiplication). +*Dot products `a*{ij} = x\*{ik}y\_{kj}` (in tensor notation) as required in [gemm (matrix-matrix) operations](https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/neon-programmers-guide-for-armv8-a/optimizing-c-code-with-neon-intrinsics/optimizing-matrix-multiplication). *“Typically, whole SIMD operations form the inner-most of loops and the registers are assigned to light weight threads, say Open MP, on the next outer layer. A further coarse grained parallelism is then supplied by the outer ‘administrative’ loop layers such as Open MPI which typically allocate heavier blocks, e.g. domain or logical decomposition of work to packages.” (quote: Roger Philp, Linaro HPC Senior Engineer)* diff --git a/src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx b/src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx index 6666dfa..34fdf40 100644 --- a/src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx +++ b/src/content/blogs/how-linaro-builds-boots-and-tests-over-a-million-linux-kernels-per-year.mdx @@ -13,7 +13,7 @@ tags: - linux-kernel - testing - ci -author: ben-copeland +author: benjamin-copeland related: [] --- diff --git a/src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx b/src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx index 8dda71e..8c95406 100644 --- a/src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx +++ b/src/content/blogs/linaro-connect-budapest-2020-cancelled.mdx @@ -1,10 +1,10 @@ --- title: Linaro Connect Budapest 2020 cancelled date: 2020-02-20T08:53:19.000Z +description: Over the last few weeks, Linaro has been carefully monitoring the Coronavirus situation. We have a duty of care for all attendees at our events. Health and safety is always our top priority. With that in mind, it is with great regret that we have decided to cancel the upcoming Linaro Connect which was due to be held on 23-27 March 2020 at the Corinthia Hotel in Budapest, Hungary. image: linaro-website/images/blog/48784720458_63040ac998_k -tags: - - linaro-connect -author: connect +tags: [] +author: linaro related: [] --- diff --git a/src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx b/src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx index 64c8cb6..5175405 100644 --- a/src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx +++ b/src/content/blogs/linaro-tech-days-a-livestream-event-of-technical-sessions.mdx @@ -1,12 +1,12 @@ --- title: "Linaro Tech Days: A livestream event of technical sessions" date: 2020-03-13T07:08:16.000Z +description: Linaro Tech Days are a series of technical sessions that will be presented live online for anyone to join. Recordings and slides will be made available on [our Resources page](https://resources.linaro.org/) for those who are not able to join. image: linaro-website/images/blog/30921188158_953bca1c9f_k tags: - - linaro-connect - arm - open-source -author: connect +author: linaro related: [] --- diff --git a/src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx b/src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx index 8f3d714..c59d9ce 100644 --- a/src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx +++ b/src/content/blogs/microsoft-to-talk-iot-security-with-azure-sphere-at-the-upcoming-linaro-connect-san-diego-2019.mdx @@ -1,10 +1,9 @@ --- -title: Microsoft to talk IoT security with Azure Sphere at the upcoming Linaro - Connect San Diego 2019 +title: Microsoft to talk IoT security with Azure Sphere at the upcoming Linaro Connect San Diego 2019 date: 2019-09-10T08:22:35.000Z +description: icrosoft to talk IoT security with Azure Sphere at the upcoming Linaro Connect San Diego 2019 image: linaro-website/images/blog/microsoft-sphere-2 tags: - - linaro-connect - linux-kernel - security - iot-embedded diff --git a/src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx b/src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx index 1110ff2..d2442db 100644 --- a/src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx +++ b/src/content/blogs/qemu-8-2-and-linaro-s-maintainer-story.mdx @@ -28,12 +28,12 @@ The year before I joined Linaro we were well represented in the contribution sta ### Top changeset contributors by employer (Dec 2013 to Dec 2014) -
+
- + @@ -80,7 +80,7 @@ The year before I joined Linaro we were well represented in the contribution sta
   Red Hat
   Red Hat

   2283 (39.3%)

-


+


In the years since I joined we have continued to invest in the project as it has become a key part of delivering for Linaro and its members. About a fifth of the [MAINTAINERS](https://gitlab.com/qemu-project/qemu/-/blob/master/MAINTAINERS?ref_type=heads) entries on the project are now Linaro email addresses covering areas such as the core TCG translator, testing frameworks, debug and introspection code as well as of course the Arm emulation support and a slew of the modelled devices. @@ -88,7 +88,7 @@ In the years since I joined we have continued to invest in the project as it has ### Top changeset contributors by employer (Nov 2022 to Nov 2023) -
+
@@ -144,7 +144,7 @@ In the years since I joined we have continued to invest in the project as it has
-


+


# What’s new in 8.2 @@ -176,13 +176,13 @@ Support for QEMU and RMM are still being staged into upstream Trusted Firmware, A [set of binaries](https://fileserver.linaro.org/s/Grjs6kSkBYd8DkX) has been created using the above instructions as a demonstration.  Unpack the tar file and execute “rmm-example/run-host.sh”: -![Picture of QEMU running with tabs for the host and realm consoles as well as the secure and non-secure serial ports.](/linaro-website/images/blog/rmm-example-run-host.sh) +![Picture of QEMU running with tabs for the host and realm consoles as well as the secure and non-secure serial ports.](/linaro-website/images/blog/rmm-example-run-host-sh) The firmware and the kernel’s earlycon will log to serial0, the secure monitor will log to serial1, and the host and realm guests consoles will be on HostConsole and RealmConsole respectively. Log into the host as “root” with no password, then execute “/mnt/run-guest.sh”.  The debugging that is enabled within the firmware will immediately begin logging about the realm creation: -![Picture of QEMU continuing to run, this time with the serial port outputting diagnostics from the Realm Machine Manager (RMM)](/linaro-website/images/blog/mnt-run-guest.sh) +![Picture of QEMU continuing to run, this time with the serial port outputting diagnostics from the Realm Machine Manager (RMM)](/linaro-website/images/blog/mnt-run-guest-sh) Within a few minutes, the guest will boot: diff --git a/src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx b/src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx index d20c8f6..42542e8 100644 --- a/src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx +++ b/src/content/blogs/standard-temperature-tooling-now-a-reality-in-the-linux-kernel.mdx @@ -1,5 +1,6 @@ --- title: Standard Temperature Tooling in the Linux kernel +description: With the Linux 5.6 release, it was announced that there is a proper drive temperature driver for disks and solid-state drives with temperature sensors - something that has been in the works for years. So what does this mean? Why is this significant? And how did Linaro play a role? image: linaro-website/images/blog/30921180788_34ce2cd5f8_c tags: - linux-kernel diff --git a/src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx b/src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx index 2878234..d32c765 100644 --- a/src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx +++ b/src/content/blogs/supporting-multiple-devices-with-the-same-aosp-images.mdx @@ -41,7 +41,7 @@ The next issue we needed to solve was getting a single kernel that booted proper The major blocker we ran into was with the bootloader. Specifically, Qualcomm's ABL (edk2/uefi secondary bootloader), which is responsible for loading the kernel and platform specific Device Tree. The primary purpose of a Device Tree (or Device Tree Blob) in Linux is to provide a way to describe non-discoverable hardware ([more on Device Tree here](https://elinux.org/Device_Tree_Reference)). And even though RB5 and DB845c share a lot of common blocks, we are still talking about two different SoCs with enough hardware differences that are not discoverable at run time. So we have to depend on DTB to provide that platform specific information to ABL. -Now, ideally, the DTB is supposed to be kept and provided by the bootloader, as it is supposed to be tied to the hardware. If that were the case, this would be even easier. However, in practice that is not particularly common, as often DTBs are in flux while drivers are upstreamed, and thus the DTBs end up being managed together with the kernel. With AOSP, the boot image can provide a single DTB as dtb.img or a list of concatenated DTBs as dtb.img. So the first step towards a single AOSP boot image was to concatenate and pass DB845c and RB5 DTBs as the dtb.img, and let the ABL select and load the platform specific DTB from dtb.img. This DTB selection or matching is done based on DTB properties like qcom,{msm-id/board-id/pmic-id}, and since these properties were not supported on DB845c and RB5 initially, we put a hook in the ABL to pick the first and the only DTB it can find in dtb.img. +Now, ideally, the DTB is supposed to be kept and provided by the bootloader, as it is supposed to be tied to the hardware. If that were the case, this would be even easier. However, in practice that is not particularly common, as often DTBs are in flux while drivers are upstreamed, and thus the DTBs end up being managed together with the kernel. With AOSP, the boot image can provide a single DTB as dtb.img or a list of concatenated DTBs as dtb.img. So the first step towards a single AOSP boot image was to concatenate and pass DB845c and RB5 DTBs as the dtb.img, and let the ABL select and load the platform specific DTB from dtb.img. This DTB selection or matching is done based on DTB properties like `qcom`,`{msm-id/board-id/pmic-id}`, and since these properties were not supported on DB845c and RB5 initially, we put a hook in the ABL to pick the first and the only DTB it can find in dtb.img. # The end result diff --git a/src/content/blogs/tuxpub-the-serverless-file-server.mdx b/src/content/blogs/tuxpub-the-serverless-file-server.mdx index 9de874c..2167f5e 100644 --- a/src/content/blogs/tuxpub-the-serverless-file-server.mdx +++ b/src/content/blogs/tuxpub-the-serverless-file-server.mdx @@ -1,10 +1,11 @@ --- title: tuxpub - The Serverless File Server +description: Linaro is presently working on a SaaS offering called TuxBuild (and companion service called TuxBoot). These technologies are implemented using the new serverless model and have a need to provide artifacts from cloud storage using a lightweight application that provides a file browser as a web-based user front end. date: 2020-06-02T11:10:28.000Z image: linaro-website/images/blog/code_highway_small tags: - datacenter -author: ben-copeland +author: benjamin-copeland related: [] --- diff --git a/src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx b/src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx index f0a9a11..edb8ca6 100644 --- a/src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx +++ b/src/content/blogs/what-to-expect-from-linaro-at-the-embedded-linux-conference-europe-2019.mdx @@ -1,5 +1,6 @@ --- title: What to expect from Linaro at the Embedded Linux Conference (Europe) 2019 +description: Linaro will be exhibiting at ELC-E in Lyon next week, showing engineering solutions for the Arm Ecosystem and how we work with members to consolidate codebases in the ecosystem as a whole and in specific market segments. date: 2019-10-22T09:48:58.000Z image: linaro-website/images/blog/40965990761_090a30658a_k tags: @@ -10,8 +11,7 @@ related: [] --- -Linaro will be exhibiting at ELC-E in Lyon next week, showing engineering solutions -for the Arm Ecosystem and how we work with members to consolidate codebases in the ecosystem as a whole and in specific market segments. +Linaro will be exhibiting at ELC-E in Lyon next week, showing engineering solutions for the Arm Ecosystem and how we work with members to consolidate codebases in the ecosystem as a whole and in specific market segments. **On the Linaro stand we’ll be talking about:** From f5182a18891773dbce96e351264bbaa8471519be Mon Sep 17 00:00:00 2001 From: Louis Date: Wed, 22 May 2024 16:40:26 +0100 Subject: [PATCH 3/5] WIP migrate wordpress 2010-2018 blogs --- src/components/article/ArticleContent.astro | 24 +- src/content/authors/peter-maydell.md | 6 + src/content/authors/shovan-sargunam,.md | 6 + src/content/authors/zoran-markovic.md | 6 + ...lerated-aes-for-the-arm64-linux-kernel.mdx | 112 ++++ ...ich-now-available-linaro-member-boards.mdx | 25 + ...er-joins-linaro-iot-and-embedded-group.mdx | 30 + ...m-service-to-android-5-tips-and-how-to.mdx | 559 ++++++++++++++++++ .../blogs/androidization-of-linux-kernel.mdx | 56 ++ src/content/blogs/aosp-on-64-bit.mdx | 66 +++ ...oards-launches-deci-core-armv8-product.mdx | 84 +++ ...eed-the-rollout-of-linux-based-devices.mdx | 75 +++ ...rate-linaro-connect-event-cambridge-uk.mdx | 42 ++ src/content/blogs/arm-trustzone-qemu.mdx | 176 ++++++ src/content/blogs/bitmain-joins-96boards.mdx | 42 ++ ...-device-tree-secure-firmware-bud17-313.mdx | 20 + ...nnect-europe-2013-lce13-dublin-ireland.mdx | 31 + ...ing-code-implementing-suspend-blockers.mdx | 118 ++++ ...coresight-perf-and-the-opencsd-library.mdx | 266 +++++++++ .../debugging-arm-kernels-using-nmifiq.mdx | 198 +++++++ ...-12-show-latest-linux-developments-arm.mdx | 27 + ...y-aware-scheduling-eas-progress-update.mdx | 394 ++++++++++++ ...pany-acadine-joins-linaro-mobile-group.mdx | 41 ++ .../fujitsu-semiconductor-joins-linaro.mdx | 30 + ...to-extend-collaboration-in-project-ara.mdx | 31 + .../google-becomes-club-member-linaro.mdx | 40 ++ .../hisilicon-joins-linaro-as-core-member.mdx | 32 + ...ins-linaro-96boards-steering-committee.mdx | 45 ++ ...lerate-advanced-server-development-arm.mdx | 35 ++ ...system-for-arm-servers-and-join-linaro.mdx | 129 ++++ ...te-collaboration-in-autonomous-driving.mdx | 61 ++ .../blogs/introducing-devicetree-org.mdx | 22 + .../blogs/is-linaro-a-distribution.mdx | 23 + ...kers-lined-up-for-linaro-connect-sfo15.mdx | 42 ++ .../blogs/kprobes-event-tracing-armv8.mdx | 318 ++++++++++ .../kvm-pciemsi-passthrough-armarm64.mdx | 322 ++++++++++ ...ports-full-range-arm-cortex-processors.mdx | 32 + src/content/blogs/lava-fundamentals.mdx | 59 ++ src/content/blogs/lava-master-images.mdx | 25 + ...collaborate-linaro-arm-linux-platforms.mdx | 91 +++ ...-first-oem-to-join-linaro-mobile-group.mdx | 35 ++ .../blogs/lg-electronics-joins-linaro.mdx | 50 ++ ...1-contributor-linux-kernel-4-9-release.mdx | 19 + ...o-14-04-release-now-available-download.mdx | 74 +++ ...16-04-release-available-for-download-2.mdx | 70 +++ .../blogs/linaro-and-distributions.mdx | 30 + ...ure-media-solutions-for-arm-based-socs.mdx | 54 ++ ...ve-accelerate-arm-software-development.mdx | 50 ++ ...g-member-linaro-community-boards-group.mdx | 44 ++ ...nounces-alibaba-group-as-latest-member.mdx | 37 ++ ...r-of-the-new-linaro-digital-home-group.mdx | 47 ++ ...-announces-arm-based-developer-cloud-2.mdx | 54 ++ .../linaro-announces-broadcom-new-member.mdx | 33 ++ ...ant-96boards-iot-edition-specification.mdx | 207 +++++++ ...ant-96boards-tv-platform-specification.mdx | 40 ++ ...irst-lts-monarch-release-opendataplane.mdx | 42 ++ ...elerate-high-performance-computing-arm.mdx | 49 ++ ...emos-upcoming-linaro-connect-hong-kong.mdx | 64 ++ ...aro-connect-usa-2013-event-santa-clara.mdx | 32 + ...ynote-speakers-linaro-connect-usa-2014.mdx | 35 ++ ...test-96boards-product-aosp-development.mdx | 202 +++++++ ...nch-of-machine-intelligence-initiative.mdx | 37 ++ ...r-of-the-linaro-community-boards-group.mdx | 42 ++ ...ftware-engineering-internet-things-iot.mdx | 54 ++ ...g-member-linaro-community-boards-group.mdx | 41 ++ .../linaro-announces-mediatek-member.mdx | 39 ++ ...naro-announces-opendataplane-tigermoth.mdx | 153 +++++ ...est-industry-leader-to-become-a-member.mdx | 42 ++ ...oftware-reference-platform-arm-servers.mdx | 39 ++ ...trum-communications-latest-club-member.mdx | 40 ++ ...ces-support-for-96boards-hikey-in-aosp.mdx | 34 ++ ...inaro-announces-zte-latest-club-member.mdx | 35 ++ .../linaro-appoints-guy-berruyer-chairman.mdx | 33 ++ ...l-guo-to-lead-greater-china-operations.mdx | 35 ++ ...ppoints-mark-orvek-post-vp-engineering.mdx | 32 + src/content/blogs/linaro-at-elc-2011.mdx | 46 ++ .../linaro-board-minutes-september-2010.mdx | 14 + ...rce-test-platform-open-compute-project.mdx | 32 + .../linaro-ceo-george-grey-speak-elc-2013.mdx | 54 ++ ...buntu-introduction-new-partner-program.mdx | 63 ++ ...m-appliedmicros-arm-64c2adbit-hardware.mdx | 52 ++ ...pplications-release-opendataplane-v1-0.mdx | 85 +++ .../linaro-forms-security-working-group.mdx | 46 ++ ...s-accelerating-open-source-development.mdx | 76 +++ ...ins-the-industrial-internet-consortium.mdx | 28 + .../linaro-launches-96boards-ai-platform.mdx | 53 ++ ...d-build-portal-engineering-group-pages.mdx | 30 + ...-interoperability-networking-platforms.mdx | 87 +++ .../blogs/linaro-makes-snowball-shine.mdx | 22 + .../blogs/linaro-names-george-grey-ceo.mdx | 27 + ...linaro-participate-open-compute-summit.mdx | 28 + ...st-board-origen-open-source-developers.mdx | 27 + src/content/blogs/linaro-q1-2011-update.mdx | 14 + src/content/blogs/linaro-q3-update.mdx | 16 + ...speakers-linaro-connect-las-vegas-2016.mdx | 41 ++ src/content/blogs/meltdown-spectre.mdx | 211 +++++++ ...hoice-performance-and-power-efficiency.mdx | 122 ++++ ...-the-performance-of-arm-virtualization.mdx | 88 +++ ...p-tee-open-source-security-mass-market.mdx | 47 ++ .../blogs/opencsd-operation-use-library.mdx | 368 ++++++++++++ ...n-center-becomes-core-member-of-linaro.mdx | 32 + ...tinno-mobile-joins-linaro-mobile-group.mdx | 35 ++ .../blogs/running-64bit-android-l-qemu.mdx | 98 +++ ...-the-linaro-hpc-special-interest-group.mdx | 33 ++ ...bank-joins-96boards-steering-committee.mdx | 38 ++ .../blogs/software-leaders-advise-linaro.mdx | 32 + src/content/blogs/stm-and-its-usage.mdx | 484 +++++++++++++++ src/content/blogs/suspend-to-idle.mdx | 143 +++++ src/content/blogs/sve-in-qemu-linux-user.mdx | 228 +++++++ .../system-on-module-specifications~.mdx | 32 + ...ment-with-no-hardware-is-that-possible.mdx | 39 ++ ...esting-a-trusted-execution-environment.mdx | 117 ++++ src/content/blogs/the-need-for-linaro.mdx | 21 + ...ring-partner-steering-committee-member.mdx | 44 ++ ...r-IV-joins-96boards-steering-committee.mdx | 38 ++ .../blogs/tricks-for-debugging-qemu-rr.mdx | 48 ++ .../u-boot-on-arm32-aarch64-and-beyond.mdx | 86 +++ .../blogs/update-android-kernel-tools.mdx | 69 +++ ...os-community-building-future-linux-arm.mdx | 32 + .../view-linaro-10-11-release-webinar.mdx | 33 ++ .../watch-announcement-linaro-computex.mdx | 15 + src/content/blogs/whats-new-qemu-2-9.mdx | 63 ++ ...hen-will-uefi-and-acpi-be-ready-on-arm.mdx | 140 +++++ ...xilinx-joins-linaro-iot-embedded-group.mdx | 29 + src/content/config.ts | 2 +- src/layouts/BaseLayout.astro | 2 +- 126 files changed, 9263 insertions(+), 12 deletions(-) create mode 100644 src/content/authors/peter-maydell.md create mode 100644 src/content/authors/shovan-sargunam,.md create mode 100644 src/content/authors/zoran-markovic.md create mode 100644 src/content/blogs/accelerated-aes-for-the-arm64-linux-kernel.mdx create mode 100644 src/content/blogs/accelerated-builds-android-ice-cream-sandwich-now-available-linaro-member-boards.mdx create mode 100644 src/content/blogs/acer-joins-linaro-iot-and-embedded-group.mdx create mode 100644 src/content/blogs/adding-a-new-system-service-to-android-5-tips-and-how-to.mdx create mode 100644 src/content/blogs/androidization-of-linux-kernel.mdx create mode 100644 src/content/blogs/aosp-on-64-bit.mdx create mode 100644 src/content/blogs/archermind-joins-96boards-launches-deci-core-armv8-product.mdx create mode 100644 src/content/blogs/arm-freescale-ibm-samsung-st-ericsson-and-texas-instruments-form-new-company-to-speed-the-rollout-of-linux-based-devices.mdx create mode 100644 src/content/blogs/arm-linux-developers-25-companies-collaborate-linaro-connect-event-cambridge-uk.mdx create mode 100644 src/content/blogs/arm-trustzone-qemu.mdx create mode 100644 src/content/blogs/bitmain-joins-96boards.mdx create mode 100644 src/content/blogs/bof-device-tree-secure-firmware-bud17-313.mdx create mode 100644 src/content/blogs/ceo-george-grey-opens-linaro-connect-europe-2013-lce13-dublin-ireland.mdx create mode 100644 src/content/blogs/converting-code-implementing-suspend-blockers.mdx create mode 100644 src/content/blogs/coresight-perf-and-the-opencsd-library.mdx create mode 100644 src/content/blogs/debugging-arm-kernels-using-nmifiq.mdx create mode 100644 src/content/blogs/demo-friday-linaro-connect-q1-12-show-latest-linux-developments-arm.mdx create mode 100644 src/content/blogs/energy-aware-scheduling-eas-progress-update.mdx create mode 100644 src/content/blogs/first-systems-software-company-acadine-joins-linaro-mobile-group.mdx create mode 100644 src/content/blogs/fujitsu-semiconductor-joins-linaro.mdx create mode 100644 src/content/blogs/google-atap-joins-linaro-mobile-group-to-extend-collaboration-in-project-ara.mdx create mode 100644 src/content/blogs/google-becomes-club-member-linaro.mdx create mode 100644 src/content/blogs/hisilicon-joins-linaro-as-core-member.mdx create mode 100644 src/content/blogs/hoperun-joins-linaro-96boards-steering-committee.mdx create mode 100644 src/content/blogs/hxt-semiconductor-joins-linaro-accelerate-advanced-server-development-arm.mdx create mode 100644 src/content/blogs/industry-leaders-collaborate-to-accelerate-software-ecosystem-for-arm-servers-and-join-linaro.mdx create mode 100644 src/content/blogs/industry-leaders-form-autoware-foundation-to-accelerate-collaboration-in-autonomous-driving.mdx create mode 100644 src/content/blogs/introducing-devicetree-org.mdx create mode 100644 src/content/blogs/is-linaro-a-distribution.mdx create mode 100644 src/content/blogs/keynote-speakers-lined-up-for-linaro-connect-sfo15.mdx create mode 100644 src/content/blogs/kprobes-event-tracing-armv8.mdx create mode 100644 src/content/blogs/kvm-pciemsi-passthrough-armarm64.mdx create mode 100644 src/content/blogs/latest-linaro-gcc-toolchain-release-supports-full-range-arm-cortex-processors.mdx create mode 100644 src/content/blogs/lava-fundamentals.mdx create mode 100644 src/content/blogs/lava-master-images.mdx create mode 100644 src/content/blogs/leaders-digital-home-solutions-collaborate-linaro-arm-linux-platforms.mdx create mode 100644 src/content/blogs/leading-china-smartphone-innovator-meizu-becomes-first-oem-to-join-linaro-mobile-group.mdx create mode 100644 src/content/blogs/lg-electronics-joins-linaro.mdx create mode 100644 src/content/blogs/linaro-1-contributor-linux-kernel-4-9-release.mdx create mode 100644 src/content/blogs/linaro-14-04-release-now-available-download.mdx create mode 100644 src/content/blogs/linaro-16-04-release-available-for-download-2.mdx create mode 100644 src/content/blogs/linaro-and-distributions.mdx create mode 100644 src/content/blogs/linaro-and-microsoft-collaborate-on-secure-media-solutions-for-arm-based-socs.mdx create mode 100644 src/content/blogs/linaro-announces-96boards-initiative-accelerate-arm-software-development.mdx create mode 100644 src/content/blogs/linaro-announces-actions-technology-founding-member-linaro-community-boards-group.mdx create mode 100644 src/content/blogs/linaro-announces-alibaba-group-as-latest-member.mdx create mode 100644 src/content/blogs/linaro-announces-allwinner-technology-as-a-founding-member-of-the-new-linaro-digital-home-group.mdx create mode 100644 src/content/blogs/linaro-announces-arm-based-developer-cloud-2.mdx create mode 100644 src/content/blogs/linaro-announces-broadcom-new-member.mdx create mode 100644 src/content/blogs/linaro-announces-first-development-board-compliant-96boards-iot-edition-specification.mdx create mode 100644 src/content/blogs/linaro-announces-first-development-board-compliant-96boards-tv-platform-specification.mdx create mode 100644 src/content/blogs/linaro-announces-first-lts-monarch-release-opendataplane.mdx create mode 100644 src/content/blogs/linaro-announces-fujitsus-collaboration-accelerate-high-performance-computing-arm.mdx create mode 100644 src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx create mode 100644 src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2013-event-santa-clara.mdx create mode 100644 src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2014.mdx create mode 100644 src/content/blogs/linaro-announces-latest-96boards-product-aosp-development.mdx create mode 100644 src/content/blogs/linaro-announces-launch-of-machine-intelligence-initiative.mdx create mode 100644 src/content/blogs/linaro-announces-lemaker-as-a-member-of-the-linaro-community-boards-group.mdx create mode 100644 src/content/blogs/linaro-announces-lite-collaborative-software-engineering-internet-things-iot.mdx create mode 100644 src/content/blogs/linaro-announces-marvell-founding-member-linaro-community-boards-group.mdx create mode 100644 src/content/blogs/linaro-announces-mediatek-member.mdx create mode 100644 src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx create mode 100644 src/content/blogs/linaro-announces-qualcomm-as-the-latest-industry-leader-to-become-a-member.mdx create mode 100644 src/content/blogs/linaro-announces-software-reference-platform-arm-servers.mdx create mode 100644 src/content/blogs/linaro-announces-spreadtrum-communications-latest-club-member.mdx create mode 100644 src/content/blogs/linaro-announces-support-for-96boards-hikey-in-aosp.mdx create mode 100644 src/content/blogs/linaro-announces-zte-latest-club-member.mdx create mode 100644 src/content/blogs/linaro-appoints-guy-berruyer-chairman.mdx create mode 100644 src/content/blogs/linaro-appoints-jill-guo-to-lead-greater-china-operations.mdx create mode 100644 src/content/blogs/linaro-appoints-mark-orvek-post-vp-engineering.mdx create mode 100644 src/content/blogs/linaro-at-elc-2011.mdx create mode 100644 src/content/blogs/linaro-board-minutes-september-2010.mdx create mode 100644 src/content/blogs/linaro-brings-open-source-test-platform-open-compute-project.mdx create mode 100644 src/content/blogs/linaro-ceo-george-grey-speak-elc-2013.mdx create mode 100644 src/content/blogs/linaro-completes-first-year-demonstrations-linaro-evaluation-builds-android-ubuntu-introduction-new-partner-program.mdx create mode 100644 src/content/blogs/linaro-connect-europe-2013-lce13-host-first-demonstration-kvm-appliedmicros-arm-64c2adbit-hardware.mdx create mode 100644 src/content/blogs/linaro-enables-wider-portability-high-speed-networking-applications-release-opendataplane-v1-0.mdx create mode 100644 src/content/blogs/linaro-forms-security-working-group.mdx create mode 100644 src/content/blogs/linaro-gains-momentum-demonstrates-progress-accelerating-open-source-development.mdx create mode 100644 src/content/blogs/linaro-joins-the-industrial-internet-consortium.mdx create mode 100644 src/content/blogs/linaro-launches-96boards-ai-platform.mdx create mode 100644 src/content/blogs/linaro-launches-android-build-portal-engineering-group-pages.mdx create mode 100644 src/content/blogs/linaro-launches-opendataplane-odp-project-deliver-open-source-cross-platform-interoperability-networking-platforms.mdx create mode 100644 src/content/blogs/linaro-makes-snowball-shine.mdx create mode 100644 src/content/blogs/linaro-names-george-grey-ceo.mdx create mode 100644 src/content/blogs/linaro-participate-open-compute-summit.mdx create mode 100644 src/content/blogs/linaro-partners-samsung-ecosystem-deliver-exciting-new-low-cost-board-origen-open-source-developers.mdx create mode 100644 src/content/blogs/linaro-q1-2011-update.mdx create mode 100644 src/content/blogs/linaro-q3-update.mdx create mode 100644 src/content/blogs/linaro-updates-schedule-list-keynote-speakers-linaro-connect-las-vegas-2016.mdx create mode 100644 src/content/blogs/meltdown-spectre.mdx create mode 100644 src/content/blogs/networking-leaders-collaborate-to-maximize-choice-performance-and-power-efficiency.mdx create mode 100644 src/content/blogs/on-the-performance-of-arm-virtualization.mdx create mode 100644 src/content/blogs/op-tee-open-source-security-mass-market.mdx create mode 100644 src/content/blogs/opencsd-operation-use-library.mdx create mode 100644 src/content/blogs/qualcomm-innovation-center-becomes-core-member-of-linaro.mdx create mode 100644 src/content/blogs/rapidly-growing-chinese-mobile-phone-maker-tinno-mobile-joins-linaro-mobile-group.mdx create mode 100644 src/content/blogs/running-64bit-android-l-qemu.mdx create mode 100644 src/content/blogs/sandia-national-laboratories-joins-the-linaro-hpc-special-interest-group.mdx create mode 100644 src/content/blogs/softbank-joins-96boards-steering-committee.mdx create mode 100644 src/content/blogs/software-leaders-advise-linaro.mdx create mode 100644 src/content/blogs/stm-and-its-usage.mdx create mode 100644 src/content/blogs/suspend-to-idle.mdx create mode 100644 src/content/blogs/sve-in-qemu-linux-user.mdx create mode 100644 src/content/blogs/system-on-module-specifications~.mdx create mode 100644 src/content/blogs/tee-development-with-no-hardware-is-that-possible.mdx create mode 100644 src/content/blogs/testing-a-trusted-execution-environment.mdx create mode 100644 src/content/blogs/the-need-for-linaro.mdx create mode 100644 src/content/blogs/thundersoft-joins-linaro-96boards-manufacturing-partner-steering-committee-member.mdx create mode 100644 src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx create mode 100644 src/content/blogs/tricks-for-debugging-qemu-rr.mdx create mode 100644 src/content/blogs/u-boot-on-arm32-aarch64-and-beyond.mdx create mode 100644 src/content/blogs/update-android-kernel-tools.mdx create mode 100644 src/content/blogs/video-plays-key-role-expanding-linaros-community-building-future-linux-arm.mdx create mode 100644 src/content/blogs/view-linaro-10-11-release-webinar.mdx create mode 100644 src/content/blogs/watch-announcement-linaro-computex.mdx create mode 100644 src/content/blogs/whats-new-qemu-2-9.mdx create mode 100644 src/content/blogs/when-will-uefi-and-acpi-be-ready-on-arm.mdx create mode 100644 src/content/blogs/xilinx-joins-linaro-iot-embedded-group.mdx diff --git a/src/components/article/ArticleContent.astro b/src/components/article/ArticleContent.astro index bd449cd..05360f3 100644 --- a/src/components/article/ArticleContent.astro +++ b/src/components/article/ArticleContent.astro @@ -1,7 +1,7 @@ --- import type { AstroComponentFactory } from "astro/runtime/server/index.js"; import CloudinaryImg from "../cloudinary/CloudinaryImg.astro"; -type Props = { Content: AstroComponentFactory; image: string }; +type Props = { Content: AstroComponentFactory; image?: string }; const { Content, image } = Astro.props; --- @@ -9,15 +9,19 @@ const { Content, image } = Astro.props;
- + { + image && ( + + ) + }
+ Ard Biesheuvel looks into accelerated Advanced Encryption Standard (AES) for + the ARM64 Linux Kernel. Read about his findings here! +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner url=https://www.google.com](/linaro-website/images/blog/core-dump) + +The Armv8 architecture extends the AArch64 and AArch32 instruction sets with dedicated instructions for AES encryption, SHA-1 and SHA-256 cryptographic hashing, and 64×64 to 128 polynomial multiplication, and implementations of the various algorithms that use these instructions have been added to the Arm and arm64 ports of the Linux kernel over the past couple of years. Given that my main focus is on enterprise class systems, which typically use high end SoCs, I have never felt the urge to spend too much time on accelerated implementations for systems that lack these optional instructions (although I did contribute a plain NEON version of AES in ECB/CBC/CTR/XTS modes back in 2013). Until recently, that is, when I received a Raspberry Pi 3 from my esteemed colleague Joakim Bech, the tech lead of the Linaro Security Working Group. This system is built around a Broadcom SoC containing 4 Cortex-A53 cores that lack the Armv8 Crypto Extensions, and as it turns out, its AES performance was dreadful. + +## AES primer + +The Advanced Encryption Standard (AES) is a variant of the Rijndael cipher with a fixed block size of 16 bytes, and supports key sizes of 16, 24 and 32 bytes, referred to as AES-128, AES-192 and AES-256, respectively. It consists of a sequence of rounds (10, 12, or 14 for the respective key sizes) that operate on a state that can be expressed in matrix notation as follows: + +![Blog Pic 1](/linaro-website/images/blog/blog-pic-1) + +where each element represents one byte, in column major order (i.e., the elements are assigned from the input in the order a0, a1, a2, a3, b0, b1, etc) + +Each round consists of a sequence of operations performed on the state, called AddRoundKey, SubBytes, ShiftRows and MixColumns. All rounds are identical, except for the last one, which omits the MixColumns operation, and performs a final AddRoundKey operation instead. + +## AddRoundKey + +AES defines a key schedule generation algorithm, which turns the input key into a key schedule consisting of 11, 13 or 15 *round keys* (depending on key size), of 16 bytes each. The AddRoundKey operation simply xor’s the round key of the current round with the AES state, i.e., + +![Blog Pic 2](/linaro-website/images/blog/blog-pic-2) + +where *rkN* refers to byte N of the round key of the current round. + +## SubBytes + +The SubBytes operation is a byte wise substitution, using one of two S-boxes defined by AES, one for encryption and one for decryption. It simply maps each possible 8-bit value onto another 8-bit value, like below + +![Blog Pic 3](/linaro-website/images/blog/blog-pic-3) + +## ShiftRows + +The ShiftRows operation is a transposition step, where all rows of the state except the first one are shifted left or right (for encryption or decryption, respectively), by 1, 2 or 3 positions (depending on the row). For encryption, it looks like this: + +![Blog Pic 4](/linaro-website/images/blog/blog-pic-4) + +## MixColumns + +The MixColumns operation is also essentially a transposition step, but in a somewhat more complicated manner. It involves the following matrix multiplication, which is carried out in GF(2^8) using the characteristic polynomial 0x11b. (An excellent treatment of Galois fields can be found [here.](https://engineering.purdue.edu/kak/compsec/NewLectures/Lecture7.pdf)) + +![Blog Pic 5](/linaro-website/images/blog/blog-pic-5) + +## Table based AES + +The MixColumns operation is computationally costly when executed sequentially, so it is typically implemented using lookup tables when coded in C. This turns the operation from a transposition into a substitution, which means it can be merged with the SubBytes operation. Even the ShiftRows operation can be folded in as well, resulting in the following transformation: + +![Blog Pic 6](/linaro-website/images/blog/blog-pic-6) + +The generic AES implementation in the Linux kernel implements this by using 4 lookup tables of 256 32-bit words each, where each of those tables corresponds with a column in the matrix on the left, and each element N contains the product of that column with the vector `{ sub(N) }`. (A separate set of 4 lookup tables based on the identity matrix is used in the last round, since it omits the MixColumns operation.) + +The combined SubBytes/ShiftRows/MixColumns encryption operation can now be summarized as + +![Blog Pic 7](/linaro-website/images/blog/blog-pic-7) + +where tbIN refers to each of the lookup tables, (+) refers to exclusive-or, and the AES state columns are represented using 32-bit words. + +Note that lookup table based AES is sensitive to cache timing attacks, due to the fact that the memory access pattern during the first round is strongly correlated with the key xor’ed with the plaintext, allowing an attacker to discover key bits if it can observe the cache latencies of the memory accesses. + +Please refer to [this link](https://engineering.purdue.edu/kak/compsec/NewLectures/Lecture8.pdf) for more information about the AES algorithm. + +## Scalar AES for arm64 + +The first observation one can make when looking at the structure of the lookup tables is that the 4 tables are identical under rotation of each element by a constant. Since rotations are cheap on arm64, it makes sense to use only a single table, and derive the other values by rotation. Note that this does not reduce the number of lookups performed, but it does reduce the D-cache footprint by 75%. + +So for the v4.11 release of the Linux kernel, a [scalar implementation of AES](http://git.kernel.org/cgit/linux/kernel/git/ardb/linux.git/tree/arch/arm64/crypto/aes-cipher-core.S?h=crypto-arm-v4.11) has been queued for arm64 that uses just 4 of the the 16 lookup tables from the generic driver. On the Raspberry Pi 3, this code manages 31.8 cycles per byte (down from 34.5 cycles per byte for the generic code). However, this is still a far cry from the 12.9 cycles per byte measured on Cortex-A57 (down from 18.0 cycles per byte), so perhaps we can do better using the NEON. (Note that the dedicated AES instructions manage 0.9 cycles per byte on recent Cortex-A57 versions.) + +## Accelerated AES using the NEON + +The AArch64 version of the NEON instruction set has one huge advantage over other SIMD implementations: it has 32 registers, each 128 bits wide. (Other SIMD ISAs typically have 16 such registers). This means we can load the entire AES S-box (256 bytes) into 16 SIMD registers, and still have plenty of registers left to perform the actual computation, where the tbl/tbx NEON instructions can be used to perform the S-box substitutions on all bytes of the AES state in parallel. + +This does imply that we will not be able to implement the MixColumns operation using table lookups, and instead, we will need to perform the matrix multiplication in GF(2^8) explicitly. Fortunately, this is not as complicated as it sounds: with some shifting, masking and xor’ing, and using a table lookup (using a permute vector in v14) to perform the 32-bit rotation, we can perform the entire matrix multiplication in 9 NEON instructions. The SubBytes operation takes another 8 instructions, since we need to split the 256 byte S-box lookup into 4 separate tbl/tbx instructions. This gives us the following sequence for a single inner round of encryption, where the input AES state is in register v0. (See below for a breakdown of the MixColumns transformation) + +![Blog Pic 8](/linaro-website/images/blog/blog-pic-8) + +Looking at the *instruction* count, one would expect the performance of this algorithm to be around 15 cycles per byte when interleaved 2x or 4x (i.e., the code above, but operating on 2 or 4 AES states in parallel, to eliminate data dependencies between adjacent instructions). However, on the Raspberry Pi 3, this code manages only 22.0 cycles per byte, which is still a huge improvement over the scalar code, but not as fast as we had hoped. This is due to the micro-architectural properties of the tbl/tbx instructions, which take 4 cycles to complete on the Cortex-A53 when using the 4 register variant. And indeed, if we base the estimation on the *cycle* count, by taking 4 cycles for each such tbl/tbx instruction, and 1 cycle for all other instructions, we get the more realistic number of 21.25 cycles per byte. + +As a bonus, this code is not vulnerable to cache timing attacks, given that the memory access patterns are not correlated with the input data or the key. + +This [code](http://git.kernel.org/cgit/linux/kernel/git/ardb/linux.git/tree/arch/arm64/crypto/aes-neon.S?h=crypto-arm-v4.11) has been part of the arm64 Linux kernel since 2013, but some improvements to it have been queued for v4.11 as well. + +## Bit sliced AES using the NEON + +The AES S-box is not an arbitrary bijective mapping, it has a carefully chosen structure, based again on finite field arithmetic. So rather than performing 16 lookups each round, it is possible to *calculate* the subsitution values, and one way to do this is described in the paper [Faster and Timing-Attack Resistant AES-GCM](https://eprint.iacr.org/2009/129.pdf) by Emilia Kaesper and Peter Schwabe. It is based on *bit slicing*, which is a method to make hardware algorithms suitable for implementation in software. In the AES case, this involves *bit slicing* 8 blocks of input, i.e., collecting all bits N of each of the 128 bytes of input into NEON register qN. Subsequently, a sequence of logic operations is executed on those 8 AES states in parallel, which mimics the network of logic gates in a hardware implementation of the AES S-box. While software is usually orders of magnitude slower than hardware, the fact that the operations are performed on 128 bits at a time compensates for this. + +An implementation of AES using bit slicing is queued for v4.11 as well, which manages 19.8 cycles per byte on the Raspberry Pi 3, which makes it the preferred option for parallelizable modes such as CTR or XTS. It is based on the Arm implementation, which I ported from OpenSSL to the kernel back in 2013, in collaboration with Andy Polyakov, who authored the Arm version of the code originally. However, it has been modified to reuse the key schedule generation routines of the generic AES code, and to use the same expanded key schedule both for encryption and decryption, which reduces the size of the per-key data structure by 1696 bytes. + +The code can be found [here](http://git.kernel.org/cgit/linux/kernel/git/ardb/linux.git/tree/arch/arm64/crypto/aes-neonbs-core.S?h=crypto-arm-v4.11). + +## Conclusion + +For the Raspberry Pi 3 (as well as any other system using version r0p4 of the Cortex-A53), we can summarize the AES performance as follows: + +![Blog Pic 9](/linaro-website/images/blog/blog-pic-9) + +## Appendix: Breakdown of the MixColumns transform using NEON instructions + +![Blog Pic 10](/linaro-website/images/blog/blog-pic-10) diff --git a/src/content/blogs/accelerated-builds-android-ice-cream-sandwich-now-available-linaro-member-boards.mdx b/src/content/blogs/accelerated-builds-android-ice-cream-sandwich-now-available-linaro-member-boards.mdx new file mode 100644 index 0000000..364f972 --- /dev/null +++ b/src/content/blogs/accelerated-builds-android-ice-cream-sandwich-now-available-linaro-member-boards.mdx @@ -0,0 +1,25 @@ +--- +author: linaro +date: 2011-12-21T12:14:19.000Z +description: CAMBRIDGE, UK - 21 DEC 2011 +link: /news/accelerated-builds-android-ice-cream-sandwich-now-available-linaro-member-boards/ +title: Accelerated builds of Android Ice Cream Sandwich now available on Linaro + member boards +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 21 DEC 2011 + +Linaro, a not-for-profit engineering organization consolidating and optimizing open source software for the Arm architecture, today announced the availability of builds of Android Ice Cream Sandwich (ICS) supporting accelerated graphics on two of its member's low cost development boards: the Samsung Origen and ST-Ericsson Snowball boards. + +Just over a month ago, within a day of Google's [release](http://source.android.com/source/downloading.html) of the 4.0.1 ICS version of Android, Linaro showed videos of it running on the Texas Instruments (TI) [PandaBoard](http://www.youtube.com/watch?v=eaVszdsZ8aY) and shortly after that on the Freescale i.MX53 [Quick Start](http://www.youtube.com/watch?v=bjvJE5uirxE) board, the Samsung Origen board and ST-Ericsson's Snowball board. The accelerated graphics support that has been made available today makes use of the Arm Mali-400 graphics processor used by two of these boards. This graphics processor is integrated with a dual-core Arm Cortex-A9 processor: on the Samsung Origen board in Samsung's Exynos 4210 SoC, and on the ST-Ericsson Snowball board in ST-Ericsson's NovaTM A9500 SoC. Users of these boards can view videos of these latest builds on Linaro's [YouTube channel](http://www.youtube.com/user/LinaroOrg) and download the accelerated builds for the [Snowball](https://releases.linaro.org/archive/11.12/android/leb-snowball/) and [Origen](https://releases.linaro.org/archive/11.12/android/leb-origen/) boards on Linaro's [releases.linaro.org](http://releases.linaro.org/) website. + +Developers are able to create optimized Linux-based devices with the support of Linaro. For example, Linaro uses the latest GCC 4.6 toolchain to build Android, enabling Linaro's Android to outperform standard Android builds in benchmarks and real-world tasks. The 4.6 toolchain allows developers to optimize for the latest SoCs like ST-Ericsson's Nova A9500 processor and Samsung's Exynos4210, which leads to an improved user experience. Furthermore, the toolchain gives early access to the performance improvements Linaro has been developing in the next release of GCC, as well as the many correctness fixes identified and provided through working with the Linaro community. As an example, the Linaro 4.6 toolchain includes features to allow software to manually or automatically parallelize compute tasks across the multiple cores in the chips. + +Linaro operates openly, and these accelerated builds are the latest downloads enabling advanced product development on hardware from its member companies. Linaro's goal is to provide consolidated and optimized open source software building blocks that provide companies with a foundation on which they can rapidly build and deliver innovative, differentiated solutions. + +### Join us for Linaro Connect Q1.12 + +Linaro Connect is held every three to four months to bring the Linux on Arm community together to work on the latest system-on-chip (SoC) developments, plan new engineering efforts and hold engineering hacking sessions. These events give the Linux community an opportunity to be a part of the Linaro team and help to define the Arm tools, Linux kernels and builds of key Linux distributions including Android and Ubuntu on member SoCs. Join us for our next event February 6-10th in San Francisco, California. diff --git a/src/content/blogs/acer-joins-linaro-iot-and-embedded-group.mdx b/src/content/blogs/acer-joins-linaro-iot-and-embedded-group.mdx new file mode 100644 index 0000000..99be04a --- /dev/null +++ b/src/content/blogs/acer-joins-linaro-iot-and-embedded-group.mdx @@ -0,0 +1,30 @@ +--- +title: Acer joins Linaro IoT and Embedded Group +description: Budapest, Hungary;  6 March 2017 +image: linaro-website/images/blog/27094831048_6ecb96f52a_o +tags: + - iot-embedded + - linaro-connect + - linux-kernel +author: linaro +date: 2017-03-06T07:55:31.000Z +link: /news/acer-joins-linaro-iot-and-embedded-group/ +related: [] + +--- + +Budapest, Hungary;  6 March 2017 + +Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced that Acer, one of the world’s top ICT companies, has joined the Linaro IoT and Embedded (LITE) Group. + +LITE focuses on delivering end to end open source reference software for more secure connected products, ranging from sensors and connected controllers to smart devices and gateways, for the industrial and consumer markets. Membership of LITE will give Acer a unique opportunity to work directly with other members of the global IoT ecosystem on shared engineering challenges. Acer, with its sound experience in hardware, software and cloud solutions is expected to help guide engineering decisions made by the group. + +“We are very pleased to have Acer join Linaro as a part of the LITE effort,” said George Grey, Linaro CEO. “Acer brings extensive experience in building products for the consumer and business markets. We look forward to working with Acer, and we expect that their experience will influence our work on building open source platforms for IoT end-points, gateway and embedded applications.” + +Industry interoperability of diverse, connected and secure IoT devices is a critical need to deliver on the promise of the IoT market. Today, product vendors are faced with a proliferation of choices for IoT device operating systems, security infrastructure, identification, communication, device management and cloud interfaces. Vendors in every part of the ecosystem are offering multiple choices and promoting competing standards. Linaro, Acer and the existing LITE members Arm, Canonical, Huawei, NXP, RDA, Red Hat, Spreadtrum, STMicroelectronics, Texas Instruments and ZTE will work to reduce fragmentation in operating systems, middleware and cloud connectivity solutions, and will deliver open source device reference platforms to enable faster time to market, improved security and lower maintenance costs for connected products. + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/adding-a-new-system-service-to-android-5-tips-and-how-to.mdx b/src/content/blogs/adding-a-new-system-service-to-android-5-tips-and-how-to.mdx new file mode 100644 index 0000000..e19d039 --- /dev/null +++ b/src/content/blogs/adding-a-new-system-service-to-android-5-tips-and-how-to.mdx @@ -0,0 +1,559 @@ +--- +excerpt: "This article explains how to add a new service and associated + application APIs to Android Lollipop 5 " +author: jacopo-mondi +description: > + Jacopo Mondi provides tips and explains how to add a new service and + associated application APIs to Android Lollipop 5. Click to read! +date: 2015-07-22T19:22:08.000Z +comments: false +title: How To Add A New System Service To Android 5 +tags: + - linux-kernel + - android +link: /blog/adding-a-new-system-service-to-android-5-tips-and-how-to/ +image: linaro-website/images/blog/Client_Devices_banner_pic +related: [] + +--- + +### **Intro:** + +An Android Service is a system component which ensures a longer-running operation can operate while not interacting with the user. Android has two types of services: bound and unbound. The unbound service runs in the background for an unlimited time while the bound service will continue to work until the activity which started the service comes to an end. + +In this article, we look at how to add a new service and associated application APIs to Android Lollipop 5. + +Starting from a stub HAL object, we’ll tie Java application APIs to low level operations, exploring each layer of Android internal components, in which is quite easy to get lost due to their depth, complexity and lack of general documentation. + +I’m going to call this project *joffee* for no particular reason (Java Coffee??) and since I need a name to append to the github repository where this code will be hosted… well, I wasn’t able to do better than this. + +Code for this example will be hosted in the following github repositories: + +*framework/base --* [https://github.com/tswindell/framework\_base\_joffee](https://github.com/tswindell/framework_base_joffee) + +*HAL --* [https://github.com/lightydo/hardware\_joffee](https://github.com/lightydo/hardware_joffee) + +### **Pre:** + +A general knowledge of Androidsystem layering is required to fully understand the following article. Knowing what an HAL object is, the role of JNI and the Binder in an Android system is a mandatory prerequisite. + +The official Android documentation is light on details on this side, but on the web many resources are available searching for the above keywords. The best typographic resource on this is still \*[“Embedded Android”]()\*http://shop.oreilly.com/product/0636920037132.do# from K. Yaghmour, which details many aspects of Android system internals, and provides links to other useful resources. + +The official Android documentation is useful as well, specifically for what concerns higher level concepts; some keyword you may be interested in looking for on the web are, in no particular order: parcelable types, AIDL, Android remote services, Message Handlers and Loopers; + +A working knowledge of Android application development, SDK components, developers API and Android filesystem, while not mandatory, is beneficial in order to expand the proposed examples and make something useful out of it. + +### **HW:** + +Since we are going to present a running code example along with the theoretical explanation of what is going on under the hood, a hardware platform to test it is of course needed. + +We are using for this example the *[NVidia Jetson Board](https://developer.nvidia.com/embedded-computing)*, a quad A-15 core development board where we are going to run Android 5.1. The Android binaries and other useful informations, such as how to flash and boot the board, can be found on [projectara wiki page](https://github.com/projectara/Android-wiki/wiki/Build-and-Boot-Instructions-for-Jetson-reference-platform), for which this board has been used as development platform. + +Follow the provided instructions to have a running Android installation on your board: If you have other development platform available they should fit as well, since there will be no direct hw interfacing involved in this example (please note that different releases of Android can differ in some internal details, this example refers to Android 5.1, as above said). + +### **The HAL:** + +We are now going to develop and deploy a very simple HAL object with a single function that prints out “Hello Android!” due to my lack of imagination in finding something more useful to do. + +Usually, HAL functions instead of simply printing out a string, interface with the underlying kernel, typically though sysfs attributes. You can find a lot on this on the web! + +Also, “Appendix A” of *Embedded Android* provides an in-depth analysis of HAL structure and components; read this first if you have problem understanding the following parts! + +Even if you are expected to know what a HAL object is and what it does, it is interesting to spend a few word on HAL loading mechanism, and how you should make your HAL object available to the rest of Android system, but first, lets’ take a look at the three components we need here: + +*Android.mk --* [https://github.com/lightydo/hardware\_joffee/blob/master/Android.mk](https://github.com/lightydo/hardware_joffee/blob/master/Android.mk) + +```makefile +# Copyright (C) 2012 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := \ + joffee.cpp + +LOCAL_MODULE_RELATIVE_PATH := hw + +LOCAL_MODULE := joffee.$(TARGET_BOARD_PLATFORM) + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) +``` + +The make target for our HAL is pretty simple; it instructs the build system on where to put the resulting .so object (*system/lib/hw)* and that we want to append the target hardware name to the library name (*joffee.tegra.so* in our case). Everything else is pretty straightforward! + +*joffee.h --* [https://github.com/lightydo/hardware\_joffee/blob/master/joffee.h](https://github.com/lightydo/hardware_joffee/blob/master/joffee.h) + +```cpp +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __ANDROID_JOFFEE_HW_INTERFACE__ +#define __ANDROID_JOFFEE_HW_INTERFACE__ + +#include +#include +#include + +#include + +__BEGIN_DECLS +#define JOFFEE_HARDWARE_MODULE_ID "joffee" +/** + * The joffee device description structure; + * First field must be the hw_device_t field; + * Other fields can be function pointers and othe exported fields + */ +struct joffee_device_t { + /* Will be used in HAL open method */ + struct hw_device_t common; + + /* Pointers to your HAL functions */ + int (*joffee_function)(void); +}; +__END_DECLS +#endif //__ANDROID_JOFFEE_HW_INTERFACE__ +``` + +The header represents the “contract” between the JNI/Java part of Android system with the HW specific part (the HAL). Respecting this “contract” guarantees that Android can run on every hardware for which the proper set of HALs have been implemented with little or no modifications (that’s the theory, at least). + +The header, as comments in code explain, provides a structure describing our device, which gather together the methods that have to be exposed to Android framework. + +The first field of this structure (*struct joffee\_device\_t* in our example) has to be a standard Android component, a *struct hw\_device\_t* field. + +Let’s see why, in the actual HAL implementation: + +*joffee.cpp --* [https://github.com/lightydo/hardware\_joffee/blob/master/joffee.cpp](https://github.com/lightydo/hardware_joffee/blob/master/joffee.cpp) + +```c++ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * @file joffee.cpp + * @brief Implements Joffee's HW abstraction layer + */ +#include +#include +#include +#include +#include +#include + +#include +#define TAG "JOFFEE!" +/** + * The function(s) exported by this HAL + */ +int joffee_function_impl(){ + ALOGE(TAG, "Hello Android!!\n"); + /* Here you should interface with your HW devices... */ + + return 0; +} + +/** This is mandatory, and part of hw_device_t */ +int close_joffee(hw_device_t* hw_dev) { + //TODO + return 0; +} +/** + * A pointer to this method is stored in + * hw_module_method_t.open; + * + * Once JNI loads the hw_module_method_t symbol, it + * can call this function and "open" the HAL layer + * receiving pointers to this module's additional methods + */ + +static int open_joffee(const struct hw_module_t *module, + char const *name, struct hw_device_t **device) { + struct joffee_device_t *dev = (struct joffee_device_t *) + malloc(sizeof(*dev)); + + if (NULL == dev) { + ALOGE(TAG, "Unable to reserve memory for joffee: %s\n", + strerror(errno)); + return -ENOMEM; + } + + /* Store pointer to HAL function(s) */ + dev->joffee_function = joffee_function_impl; + + /* Initialize common fields */ + dev->common.tag = HARDWARE_DEVICE_TAG; + dev->common.version = 0; + dev->common.module = (struct hw_module_t *)module; + dev->common.close = close_joffee; + + /* Store this module's structure in the output parameter 'device' */ + /* Remember the first field of your HAL device must be an hw_device_t */ + *device = (struct hw_device_t *)dev; + return 0; +} +/* + * The Joffee HAL description + * Will be loaded using libhardware + */ +static struct hw_module_methods_t joffee_methods = +{ + .open = open_joffee, +}; + +struct hw_module_t HAL_MODULE_INFO_SYM = +{ + .tag = HARDWARE_MODULE_TAG, + .version_major = 0, + .version_minor = 1, + .id = JOFFEE_HARDWARE_MODULE_ID, + .name = "Joffee HAL", + .author = "Linaro", + .methods = &joffee_methods, +}; +``` + +This cpp (or C) module is the actual HAL implementation, where methods for accessing your hardware have to be implemented. We know that the resulting .so will be placed somewhere in *system/lib/hw,* but how is loading of this specific object performed in Android? + +The answer is at the end of the source file, where a mandatory *struct hw\_module\_t HAL\_MODULE\_INFO\_SYM* instance has to be provided. + +Android features and HAL loading mechanism, implemented in libhardware library, that walks all the possible paths where an HAL can be placed, building several different library name and trying to load symbols from them. + +When the proper .so object has been found, its symbols are loaded, in particular the *HAL\_MODULE\_INFO\_SYM* field, which feature a *.open* method; this will be used by framework layer to actually “open” the library and get pointers to its additional methods. + +The over-commented *joffee.cpp* implementation provides detail about how this happens, and what you have to be careful about; specifically in putting an *hw\_device\_t* filed as first member of your HAL device structure. + +This neat hack, realizes the *Android standard way* for loading HAL object, and every library which wants to be used as an HAL by the system has to respect this conventions. + +### **The JNI Layer:** + +With the introduction of a JNI layer, we now move to the *frameworks/base* directory of Android sources, where a git project (named frameworks\_base) implements the real meat of Android systems. + +Framework\_base is quite a huge project, where all the Android services, core libraries and application support libraries reside. Its organization is difficult to follow, with several layers scattered in different packages on the filesystem, and services implementation where design which can greatly differ. + +Some principles, by the way, are common to the all of them fortunately. Again, the web is full of books and articles which can explain more that this single article. + +We’ll start our new service from the bottom, that is the connection between the C/C++ HAL layer and the Java framework implementation. + +Each service that communicates with an HAL needs a way to interface to native code, and the Java programming language provides a tool which allows native code to be executed from Java programs (and the other way around). + +The path of the JNI service directory is then -- *[frameworks/base/services/core/jni/](https://github.com/tswindell/framework_base_joffee/tree/master/services/core/jni)* + +where a series of cpp files, named with naming scheme resembling the service packages they are loaded by is present. + +We are going to implement the *JoffeeService* service, thus out JNI file will be called *com\_android\_server\_joffeeService.cpp* + +[https://github.com/jmondi/framework\_base\_joffee/blob/master/services/core/jni/com\_android\_server\_joffeeService.cpp](https://github.com/tswindell/framework_base_joffee/blob/master/services/core/jni/com_android_server_joffeeService.cpp) + +We are exposing here two functions, one called at service startup and one that wraps the HAL method we want to use from Java. + +In the init method we see the HAL loading mechanism in action, as it has been described in the previous paragraph. + +The HAL object gets loaded, then, once we do have a pointer to its *open* method, the *joffee\_device\_t* structure gets filled with pointers to the HAL functions, so we can call them knowing the ‘contract’ specified by the header file *joffee.h;* + +We need of course to add this new file to the Android build system and to register the method tablet to the global JNI *OnLoad* methods; we need then to modify *[services/core/jni/Android.mk](https://github.com/tswindell/framework_base_joffee/blob/master/services/core/jni/Android.mk)* to add the new JNI source file and *[services/core/jni/onload.cpp](https://github.com/tswindell/framework_base_joffee/blob/master/services/core/jni/onload.cpp)* to call the method table registration. + +*** + +***TIP:***  The header file *joffee.h* should be placed in a directory known to the build system; HAL header files are usually placed in *hardware/libhardware/include/hardware* + +We can symlink our *joffee.h* there, or modify the build system *(services/core/jni/Android.mk)* in order to add our *hardware/joffee/ directory* to the inclusion path flags. + +*** + +Once we have added out JNI to the system, we can build the service layer, and have a library ready to be deployed in a running system: + +*$mmm services* +... +*target Strip: libandroid\_servers (out/target/product/jetson/obj/lib/libandroid\_servers.so)* +*Install: out/target/product/jetson/system/lib/libandroid\_servers.so* + +### **The Service** + +Services are android libraries, usually written Java, which provide a remote endpoint for applications where to access system functionalities, privileged operations, and general abstraction to the underlying system. + +Services run in privileged context compared to applications, and they perform sensible operations on their behalf. + +It is common to compare Android services with Linux distributions daemons, and at some extent this comparison is acceptable. One of the main differences is that Linux daemons do not always interact with programs, which can access the filesystem and peripherals by means of dedicated libraries; in Android it is almost mandatory for applications to interface with a remote service, which performs security checks, guarantees safety for concurrent accesses and dispatches events to higher layers. + +This ‘strict’ policy delivers a higher degree of consistency in Android system internals, were the roles of each components are more defined compared to other \*nix distributions, resulting in a more defined layered structure, where exceptions are of course allowed, but where most of the system core components are designed respecting the same patterns. + +*** + +***TIP:*** A note on frameworks/base/ repository organization: + +While *frameworks/base* represents the most significant part of an Android system, and a lot of files are part of this repository, in general its organization can be divided in + +*frameworks/base/service/core ->* The “right” side of the Binder. + +Here are implemented the service interfaces; + +generates services.jar + +*framework/base/core/ ->* The “left” side of the Binder. + +Implements the application-facing part of an Android system, which speaks with services using interfaces; the packages implemented here compose the Android API, and are usually part of the SDK. + +*** + +The most important part of a system service is thus, its interface. + +Android revolves around the well known IPC mechanism implemented by the Binder, which provides an RPC-like system and allow *transactions* to happen between objects that know each other only by their respective interfaces, with no explicit dependencies at build time, nor (even more important) at deploy time. + +Binder has a long history and the web is full of articles about it and its internals, including comparisons with similar tools known by Linux developers due to their extensive presence in many distributions (d-bus and other RPC or general IPC daemons and utilities). + +Interfaces are defined in Android by mean of a special language, called AIDL (Android Interface Definition Language), which closely resemble a traditional Java Interface definition. + +Android provides a set of tools which generates the necessary plumbing to connect that interfaces to the Binder, and have them accessible from our code. + +Let’s start with I*JoffeeService.aidl* + +[https://github.com/jmondi/framework\_base\_joffee/blob/master/core/java/android/joffee/IJoffeeService.aidl](https://github.com/tswindell/framework_base_joffee/blob/master/core/java/android/joffee/IJoffeeService.aidl) + +Interfaces get defined in the application facing part of the system, because they have to be visible to managers and application, we have prepared a directory for our interface in + +*[frameworks/base/core/java/android/joffee/](https://github.com/tswindell/framework_base_joffee/tree/master/core/java/android/joffee)* and modified the[ Android.mk](https://github.com/tswindell/framework_base_joffee/blob/master/Android.mk) accordingly. + +Once we have defined an AIDL we have to implement the “real” service which will realize the above defined interface. Services are the “right” side of the Binder and reside in *[frameworks/base/services/core/java/com/android/server](https://github.com/tswindell/framework_base_joffee/tree/master/services/core/java/com/android/server/joffee)* Again we have prepared there a directory here to host our joffee service. + +Let’s start from the basic here; since Android services are all started by the main system service, they have to extend the same super class, which is, with no surprise, *SystemService.* + +In order to have out implementation of the above implemented AIDL interface, we need to *Publish* it to the Binder, and of course implement it, as an *IBinder* object; + +In the service constructor we proceed in registering the service + +```java +public JoffeeService(Context context) { +    super(context); + +    mContext = context; + + publishBinderService(context.JOFFEE_SERVICE, mService); + +} +``` + +*** + +***TIP:*** There are many ways to register and publish a service; explore the SystemService class and other services to find the one that best fit your needs. + +*** + +The service will of course need to talk to the JNI we have prepared, so at the end of the file we declare the prototypes of the *native* functions we want to call. + +At the time of service start, we also init the native layer: + +```java +@Override + +public void onStart() { + +   mNativePointer = init_native(); + +} +``` + +Now it is time to implement the AIDL interface in the *IBinder* object we have published in the class constructor. + +Since the interface is trivial, the implementation will also be very simple: + +```java + private final IBinder mService = new IJoffeeService.Stub { + +    public void callJoffeeMethod() { + +        joffeeFunction_native(); +    } + + } +``` + +In this way, when someone from the “left” side of the Binder will call *callJoffeeFunction*, it will simply trigger the JNI layer, which will then invoke the underlying HAL. + +Now that we have implemented the service, we need to tweak the Android system components to start it, the Android system server location, is at + +*frameworks/base/services/java/com/android/server/SystemServer.java* + +Here we start the service, as the code on github shows. + +*** + +**TIP:** Pay attention here, the SystemServer path is f*rameworks/base/services/java/.../ \_and not f\_rameworks/base/services/core/.../* + +Under “*core*” you will find *SystemServ**ice**.java \_not* Serv\_*er* which is another Android component. + +*** + +Now that we have implemented the “right” side, we need to add an API, to have application interact with our service. + +### **The Manager** + +Associated with each service, there usually is a so-called Manager (services are sometimes called *\*ManagerService*). Managers provide an application a suitable API, that becomes part of the SDK, and mediates between apps and remote services. + +Our manager will use the remote services interface, and will not do anything particularly useful. In “real” use cases, managers take care of delivering notifications, filtering intents, and check permissions. In some cases managers tie directly into jni when HW access is performed directly from Java (eg. USB device) + +Manager will be placed in -- *[frameworks/base/core/java/android/joffee](https://github.com/tswindell/framework_base_joffee/tree/master/core/java/android/joffee)* in the *android.joffee* package, where we put the AIDL interface of our service; + +Implementation is trivial, as the service exposes a single method, which we wrap in what will become part of our new system API +[https://github.com/jmondi/framework\_base\_joffee/blob/master/core/java/android/joffee/JoffeeManager.java](https://github.com/tswindell/framework_base_joffee/blob/master/core/java/android/joffee/JoffeeManager.java) + +The implementation also contains some pointers to how you can *hide* methods from appearing in the public SDK using decorators and JavaDoc, take a look at other managers to see how they use them. + +*** + +**Note 1:** *The proposed implementation is trivial, and does not features any advanced use of Binder RPC and argument marshalling/un-marshalling (or flattening/unflattening, see Binder documentation on this).* + +*All the proposed methods do not accept parameters nor return anything.* + +*It is of course possible to define types which can be passed from one side of the Binder to the other, those types are said to be Parcelable types. Android documentation provides some material on them, and you can have a look at android.hardware.input to see how they are used in manager-services communications* + +**Note 2:** *What we have seen here is the definition of a service interface which is used by a manager. The other way around is of course possible, and it is useful to implement callbacks and listeners which notify applications about some specific event.* + +*Again, have a look at input or USB managers to see how you can define and register an interface which services can call into and deliver messages to Manager or applications.* + +*** + +### **Registering service and Manager** + +Once we have implemented both managers and service, we need a way to retrieve them from application, and start calling their methods.  The default way to retrieve a manager instance is to use the *getSystemService* method, providing the right identifier. + +We need to register in the execution context our new service and our manager in order to be able to retrieve them later, and we have to do that in *[frameworks/base/core/java/android/app/ContextImpl.java](https://github.com/tswindell/framework_base_joffee/blob/master/core/java/android/app/ContextImpl.java)* + +```java +    registerService(JOFFEE_SERVICE, new ServiceFetcher() { + +        public Object createService(ContextImpl ctx) { + +            IBinder b = ServiceManager.getService(JOFFEE_SERVICE); + +            return new JoffeeManager(ctx, IJoffeeService.Stub.asInterface(b)); + +       }}); +``` + +Just adding this allows application to later retrieve an instance to our JoffeeManager. + +### **Deploy and testing** + +Now that all pieces are in place, we just need to update the system API and build the SDK, to have our new objects available to applications. + +*$ make update-api* + +*$ make sdk* + +And you can now make Android studio point to the newly built SDK (copy it somewhere, so you can avoid it gets overwritten by new builds) + +*** + +**TIP:** After building the SDK, you cannot build *frameworks/base/* alone anymore, you will get + +make: No rule to make target *out/target/product/jetson/system/framework/framework-res.apk'* + +You can overcome this with + +*$ mmma frameworks/base* + +but it takes a long time, otherwise build frameworks-res alone, then rebuild frameworks + +*$ mmm frameworks/base/core/res* +*$ mmm frameworks/base* + +*** + +Let’s now deploy all our pieces onto the real target + +```bash +adb remount + +mmm hardware/joffee/ +adb push out/target/product/jetson/system/lib/hw/joffee.tegra.so system/lib/hw/ + +mmm frameworks/base/ +adb push out/target/product/jetson/system/framework/framework.jar system/framework/ + +mmm frameworks/base/services +adb push out/target/product/jetson/system/framework/services.jar system/framework/ +adb push out/target/product/jetson/system/lib/libandroid_servers.so system/lib/ + +adb reboot +``` + +And make Android studio point to your newly build SDK (File->Project Structure -> SDK Location) to test our new API + +```bash +make update-api +make sdk +cp -r out/host/linux-x86/sdk/jetson/android-sdk_eng.jmondi_linux-x86 ~/Android/Sdk_Joffee/ +``` + +Now we can test our implementation with the simplest possible application + +```java + import android.app.Activity;import android.content.Context; + import android.joffee.JoffeeManager; + + + public class MainActivity extends Activity { + + private JoffeeManager joffeeManager; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + setContentView(R.layout.activitymain); + + joffeeManager = getSystemService(JOFFEE_SERVICE); + + joffeeManager.callJoffeeMethod(); + + } + + +} +``` + +If everything has gone in the right way, you should see the *“Hello Android!!”* printout on logcat! + +### **Conclusions** + +Adding new services to Android requires a vast knowledge of many system aspects, and a lot of tweaking of existing parts, where most of the time the only resource you have is the existing code in AOSP. + +Android features a nice structured and layered design, which allows it to expose a well defined and well documented set of API to application developer. The counter of all of this is the depth of its internals, and the number of “mobile parts” you have to touch to to include new features of modify existing ones. diff --git a/src/content/blogs/androidization-of-linux-kernel.mdx b/src/content/blogs/androidization-of-linux-kernel.mdx new file mode 100644 index 0000000..72fb321 --- /dev/null +++ b/src/content/blogs/androidization-of-linux-kernel.mdx @@ -0,0 +1,56 @@ +--- +author: vishal-bhoj +date: 2012-03-20T15:21:41.000Z +description: Vishal Bhoj of the Linaro Android team explains how he recently + "androidized" the 3.2 Linux Kernel for Vexpress-rtsm. +keywords: Linaro, Linux on Arm, Arm, Arm SoC, Android, 3.3 Linux Kernel, Androidization +link: /blog/android-blog/androidization-of-linux-kernel/ +tags: + - android +title: Androidization of linux kernel +related: [] + +--- + +I have always wondered how one should be applying the Android patches onto any Linux kernel. Recently I had to do the same stuff. Here is a short description on how I androidized the 3.2 Linux kernel. I have added the Android patches to 3.2 Linux kernel for Vexpress-rtsm. Since the kernel was close to the upstream kernel there were no merge conflicts luckily. + +Here is the vanilla Linux kernel to which Android patches were added: + +[http://git.linaro.org/gitweb?p=people/dmart/linux-3-arm.git;a=shortlog;h=refs/heads/arm/vexpressdt-rtsm]()http://git.linaro.org/gitweb?p=people/dmart/linux-3-arm.git;a=shortlog;h=refs/heads/arm/vexpressdt-rtsm + +Andy Green from the Landing team has provided a topic(linaro-androidization-tracking) branch for 3.2 Linux kernel: + +[http://git.linaro.org/gitweb?p=landing-teams/working/ti/kernel.git;a=shortlog;h=refs/heads/linaro-androidization-tracking]()http://git.linaro.org/gitweb?p=landing-teams/working/ti/kernel.git;a=shortlog;h=refs/heads/linaro-androidization-tracking + +Androidization process was just 4 step process: + +1. Clone the Linux kernel and create a branch for androidization : + + git clone http://git.linaro.org/git/people/dmart/linux-3-arm.git -b arm/vexpressdt-rtsm + + git checkout -b android + +2. Add the remote topic branch: + + git remote add androidization git://git.linaro.org/landing-teams/working/ti/kernel.git + +3. Fetch and rebase the kernel: + + git fetch androidization + + git rebase remotes/androidization/linaro-androidization-tracking + +4. Add the necessary configs to the board-defconfig file to enable Android components in the kernel: + + CONFIG\_ASHMEM=y + CONFIG\_STAGING=y + CONFIG\_ANDROID=y + CONFIG\_ANDROID\_BINDER\_IPC=y + CONFIG\_ANDROID\_LOGGER=y + CONFIG\_ANDROID\_RAM\_CONSOLE=y + CONFIG\_ANDROID\_LOW\_MEMORY\_KILLER=y + +Additionally I had to set "CONFIG\_VMSPLIT\_3G=y" for Android to boot on [vexpress RTSM/Fastmodel](https://www.arm.com/products/development-tools/simulation/fast-models). + +The androidization patches are usually provided by Google but was not available for 3.2 kernel. For people working on 3.3 kernel, androidization patches are available from Google at: +[ https://android.googlesource.com/kernel/common.git](https://android.googlesource.com/kernel/common.git) for the Android-3.3 branch. diff --git a/src/content/blogs/aosp-on-64-bit.mdx b/src/content/blogs/aosp-on-64-bit.mdx new file mode 100644 index 0000000..b517737 --- /dev/null +++ b/src/content/blogs/aosp-on-64-bit.mdx @@ -0,0 +1,66 @@ +--- +excerpt: Following the recent announcement of the Android™ L Developer Preview, + Linaro, the collaborative engineering organization developing open source + software for the Arm® architecture, today announced that a build of the + Android Open Source Project (AOSP) to the Armv8-A architecture has been made + available as part of the Linaro 14.06 release. This build has been tested on + an Armv8-A 64-bit hardware development platform, code-named “Juno”, available + from Arm for lead and ecosystem partners. +title: Linaro announces Android Open Source Project build for Armv8-A + Architecture is ready and running on a 64-bit multi-core SoC +description: In this article, Linaro announces Android Open Source Project build + for Armv8-A Architecture is ready & running on 64-bit multi-core SoC. Read + more here. +image: linaro-website/images/blog/Client_Devices_banner_pic +author: steve-taylor +date: 2014-07-02T11:00:56.000Z +tags: + - android +link: /news/aosp-on-64-bit/ +related: [] + +--- + +## Armv8-A architecture enablement to accelerate with silicon and software availability + +CAMBRIDGE, UK; 2 JULY 2014 + +Following the recent announcement of the Android™ L Developer Preview, Linaro, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that a build of the Android Open Source Project (AOSP) to the Armv8-A architecture has been made available as part of the Linaro 14.06 release. This build has been tested on an Armv8-A 64-bit hardware development platform, code-named “Juno”, available from Arm for lead and ecosystem partners. + +The Linaro Armv8-A reference software stack combined with the Arm Development Platform (ADP) provides the Arm ecosystem with a foundation to accelerate Android availability on 64-bit silicon. This announcement is the culmination of a broad architecture enablement program carried out by Linaro, Arm and the Arm partnership. Arm partners now have access to a broad range of supporting material including the Armv8 Fast Models, open source toolchain from Linaro and supporting documentation. + +“The Arm ecosystem is rapidly preparing for the benefits a 64-bit Arm architecture will bring to devices starting this year,” said James McNiven, general manager of systems and software at Arm. “Our collaboration with Linaro will enable our partners to create 64-bit devices that will drive the best next-generation mobile experience on Android operating systems,while also providing full compatibility with today’s 32-bit mobile ecosystem that is optimized on Arm-v7A.” + +The Linaro 14.06 release includes a 64-bit primary/32-bit secondary binary image and source code based on the Linaro Stable Kernel (LSK) 3.10 for Android, compiled with GCC 4.9 and tested on both the Armv8-A 64-bit hardware platform and Armv8-A Fast Models. The AOSP is based on the Open Master snapshot downloaded on June 1st with HDMI drivers loaded as modules. The release is built with the Android runtime (ART) compiler as the default virtual machine. Peripheral and advanced power management support plus several accelerations will not be available in this release, but will follow in future releases on a monthly cadence. + +“We have been using Arm Fast Models to develop for AOSP for a long time and it is testament to the quality of our collaborative engineering that we have delivered this release running on the Armv8-A hardware platform so quickly,” said George Grey, Linaro CEO. “We look forward to working closely with our members to enable them to deliver next generation Android solutions rapidly to the market.” + +The Armv8-A hardware development platform includes an SoC with a quad-core Arm Cortex®-A53 CPU and dual-core Arm Cortex-A57 CPU in an Arm big.LITTLE™ processing configuration with a quad-core Arm Mali™-T624 GPU linked via Arm CoreLink™ system IP and implemented using Arm Artisan® physical IP. The development platform with its Armv8-A software stack provides Arm software and silicon partners with a common foundation to accelerate their Armv8-A software development. Further information about this platform is available from the Arm website here: [www.arm.com/juno](https://www.arm.com/products/development-tools/development-boards/juno-arm-dev-platform). + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a collaborative engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: its goal is to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The majority of Linaro’s engineering work is open to all online. To find out more, please visit [](/). + +**About Arm** + +Arm is at the heart of the world's most advanced digital products. Our technology enables the creation of new markets and transformation of industries and society. We design scalable, energy efficient-processors and related technologies to deliver the intelligence in applications ranging from sensors to servers, including smartphones, tablets, enterprise infrastructure and the Internet of Things. + +Our innovative technology is licensed by Arm Partners who have shipped more than 50 billion Systems on Chip (SoCs) containing our intellectual property since the company began in 1990. Together with our Connected Community, we are breaking down barriers to innovation for developers, designers and engineers, ensuring a fast, reliable route to market for leading electronics companies. Learn more and join the conversation at http://community.arm.com. + +## **Partner Testimonials** + +**Allwinner Technology** + +“We are working closely with Linaro to take our leadership experience in Android tablets into the digital home and beyond,” said Jack Lee, Chief Marketing Officer of Allwinner. “The Linaro builds of the Android Open Source Project (AOSP) for 64-bit Armv8 platforms will be a key building block to enable us to offer a range of differentiated solutions across multiple markets.” + +*About Allwinner:* Allwinner Technology is a leading fabless design company dedicated to smart application processor SoCs and smart analog ICs. Its product line includes multi-core application processors for smart devices and smart power management ICs used by brands worldwide. + +With its focus on cutting edge UHD video processing, high performance multi-core CPU/GPU integration, and ultra-low power consumption, Allwinner Technology is a mainstream solution provider for the global tablet, internet TV, smart home device, automotive in-dash device, smart power management, and mobile connected device markets. Allwinner Technology is headquartered in Zhuhai, China. See www.allwinnertech.com for more information. Follow Allwinner on Twitter @AllwinnerTech. Media contact: service@allwinnertech.com + +**Texas Instruments** + +“Linaro’s release of AOSP and OpenEmbedded builds running on Armv8 hardware and models is an important step in the development of the next generation of our SoCs for multiple markets,” said Matthew Watson, Product Line Manager, Infotainment Processors, Texas Instruments. “We look forward to working closely with Linaro to deliver Armv8-based solutions, targeting markets including networking equipment, automotive, industrial, and other markets.” + +*About Texas Instruments:* Texas Instruments Incorporated (TI) is a global semiconductor design and manufacturing company that develops analog ICs and embedded processors. By employing the world's brightest minds, TI creates innovations that shape the future of technology. TI is helping more than 100,000 customers transform the future, today. Learn more at [http://www.ti.com/](http://www.ti.com/). diff --git a/src/content/blogs/archermind-joins-96boards-launches-deci-core-armv8-product.mdx b/src/content/blogs/archermind-joins-96boards-launches-deci-core-armv8-product.mdx new file mode 100644 index 0000000..18d44bb --- /dev/null +++ b/src/content/blogs/archermind-joins-96boards-launches-deci-core-armv8-product.mdx @@ -0,0 +1,84 @@ +--- +title: ArcherMind Joins 96Boards and Launches Deca-Core Armv8 Product +description: "Cambridge, UK: 16 August 2016" +image: linaro-website/images/blog/Banner_Core_Technologies +tags: [] +author: linaro +date: 2016-08-16T15:39:55.000Z +link: /news/archermind-joins-96boards-launches-deci-core-armv8-product/ +related: [] + +--- + +Cambridge, UK: 16 August 2016 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that ArcherMind Technology (Nanjing) Co., Ltd has joined the 96Boards initiative as a Steering Committee Member and Manufacturing Partner and they are preparing the launch of their first 96Boards product. + +ArcherMind’s board will put one of the most advanced Arm-based SOCs into the hands of all types of developers - from individual hobbyists building one-off projects to commercial engineering teams developing the next generation of mass market mobile and IoT products. The Mediatek X20 (MT6797) deca-core Armv8 SoC offers four Arm Cortex-A53 cores for basic processing, four Arm Cortex-A53 cores for mid- to heavy-use cases, and two Arm Cortex-A72 for bursts of maximum performance along with an advanced Arm Mali-T880 MP4 graphics chip clocked to 800MHz. This enables the board to deliver unprecedented performance for a whole host of different applications. The board is available for pre-order [here](https://www.96boards.org/carbon-buy). + +“Our collaboration with MediaTek and 96Boards is designed to empower our customers,” said Bin Du, CTO of ArcherMind. “This is a unique opportunity for us to combine MediaTek’s high performance hardware technology with Linaro’s open source expertise and our own specialized software services to make a product for all types of developers. We are committed to supporting this community and look forward to seeing the innovative solutions that they develop.” + +Founded in 2006, ArcherMind is a global, professional software service supplier specializing in software product design, code development, quality assurance, technology support, etc. The company is devoted to providing specialized world-wide software R\&D services and focusing on R\&D and consulting service in the field of mobile devices and wireless internet software. Headquartered in Nanjing, China, it also has offices in Beijing, Shanghai, Shenzhen ,Wuhan, Xi’an and Guangzhou and is active in North America, Europe, Japan and Korea. + +“ArcherMind has a track record of providing platform software solutions to many leading companies and I’m excited to see them joining the 96Boards initiative,” said Yang Zhang, Director of 96Boards. “What makes this partnership particularly interesting is that it involves collaboration with another one of our members - MediaTek. This is a perfect example of what makes Linaro and 96Boards so unique - we offer a common platform upon which members can collaborate and thereby accelerate innovation.” + +**ArcherMind MediaTek X20 Board Specifications** + +*Processor* +Helio X20 64-bit deca-core with: +Two Cortex-A72 cores (2.1-2.3GHz) +Four Cortex A53 cores (1.85GHz) +Four Cortex-A53 cores (1.4GHz) + +*Graphics processor* +Arm Mali-T880 MP4 800MHz +Multimedia Support +32MP @ 24fps / 25MP @ 30 fps +WQXGA 2560×1600 60fps FHD 1920×1080 120fps +4Kx2K 30fps H.265 w/HDR + +*Memory and Storage* +2GB LPDDR3 2CH,933MHz +8GB EMMC5.1 + +*Extended storage interface* +Support Micro SD card (SD3.0) + +*Display interface* +HDMI Full-size Type-A, support 1080P@ 30 fps + +*Wireless* +MT6631 chip +WLAN 802.11a/b/g/n 2.4GHz and 5GHz(On-board BT and WLAN antenna) +*Bluetooth*® wireless technology 4.1 +HS compliant +GPS (with antenna connector) + +*USB interface* +One USB2.0 high speed micro B (Device mode) +Two USB2.0 high speed Type A (Host mode) + +*IO extended interface* +One 40-pin Low Speed (LS) expansion connector: UART, SPI, I2S, I2C x2, GPIO x12, DC power +One 60-pin High Speed (HS) expansion connector: 4L-MIPI DSI, USB, I2C x2, 2L+4L-MIPI CSI +One optional 16-pin analog expansion connector for stereo headset/line-out, speaker and analog line-in + +*Manual Controls* +4 Mechanical Buttons: Power/Reset/Volume Up/down +6 LED Lights:4 user controllable, 2 -or radios (BT and WLAN activity) + +*Power supply* +DC input +8V \~ +18V + +*Operating System* +Android Marshmallow 6.0 + +*Size* +85mm × 54mm + +**About 96Boards** +96Boards is the first open specification to define a platform for the delivery of compatible low-cost, small footprint 32-bit and 64-bit Cortex-A boards from the range of Arm SoC vendors. There are currently two 96Boards specifications for low-cost Armv7-A and Armv8-A development boards: The Consumer Edition (CE), which targets the mobile, embedded and digital home segments, and the Enterprise Edition (EE), which targets the networking and server segments. To find out more, please visit [http://www.96Boards.org](https://www.96boards.org/). + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [](). diff --git a/src/content/blogs/arm-freescale-ibm-samsung-st-ericsson-and-texas-instruments-form-new-company-to-speed-the-rollout-of-linux-based-devices.mdx b/src/content/blogs/arm-freescale-ibm-samsung-st-ericsson-and-texas-instruments-form-new-company-to-speed-the-rollout-of-linux-based-devices.mdx new file mode 100644 index 0000000..4aebc05 --- /dev/null +++ b/src/content/blogs/arm-freescale-ibm-samsung-st-ericsson-and-texas-instruments-form-new-company-to-speed-the-rollout-of-linux-based-devices.mdx @@ -0,0 +1,75 @@ +--- +author: linaro +date: 2010-06-03T10:50:19.000Z +description: CAMBRIDGE, UK - 3 JUN 2010 +link: /news/arm-freescale-ibm-samsung-st-ericsson-and-texas-instruments-form-new-company-to-speed-the-rollout-of-linux-based-devices/ +title: Arm, Freescale, IBM, Samsung, ST-Ericsson and Texas Instruments Form New + Company to Speed the Rollout of Linux-Based Devices +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 3 JUN 2010 + +## Linaro unites industry leaders to foster innovation in the Linux® community through a common foundation of tools and software + +* Arm, Freescale, IBM, Samsung, ST-Ericsson and Texas Instruments have created the not-for-profit company, Linaro, committed to providing new resources and industry alignment for open source software developers using Linux on the world’s most sophisticated semiconductor System-on-Chips (SoCs). + +* Linaro will invest resources in open source projects that can then be used by Linux-based distributions such as Android, LiMo, MeeGo, Ubuntu and webOS. + +* Linaro will provide a stable and optimized base for distributions and developers by creating new releases of optimized tools, kernel and middleware software validated for a wide range of SoCs, every six months. + +* Linaro's base of software and tools will be applicable to a wide range of markets, helping reduce time-to-market for products such as smart phones, tablets, digital televisions, automotive entertainment and enterprise equipment. + +* Linaro's first software and tools release is due out in November 2010, and will provide optimizations for the latest range of Arm® Cortex™-A family of processors. + +Arm,  Freescale Semiconductor, IBM, Samsung, ST-Ericsson and Texas Instruments Incorporated (TI), today announced the formation of Linaro, a not-for-profit open source software engineering company dedicated to enhancing open source innovation for the next wave of always-connected, always-on computing. Linaro's work will help developers and manufacturers provide consumers with more choice, more responsive devices and more diverse applications on Linux-based systems. + +Linaro aligns the expertise of industry-leading electronics companies to accelerate innovation among Linux developers on the most advanced semiconductor SoCs (System-on-Chip). The current wave of "always-connected, always-on" devices requires complex SoCs to achieve the performance and low power consumers demand. Linaro was formed to increase investment in open source, address the challenges in developing products for sophisticated consumer markets and provide the support of a broad array of semiconductor products from multiple companies. By providing the common foundations of tools and software for other distributions and stacks to build upon, Linaro enables greater operational efficiency for the electronics industry. + +### Background + +Traditionally, the Linux and open-source software communities focused on solving the software problems of enterprise and computing markets with a limited choice of processor platforms. The open source community is transitioning to create advanced Web-centric consumer devices using high profile open source based distributions and a wide range of high-performance, low-power Arm®-based SoCs. Linaro will make it easier and quicker to develop advanced products with these high profile distributions by creating software commonality across semiconductor SoCs, from multiple companies. + +### Consumer Benefits + +In addition to providing a focal point for open source software developers, consumers will benefit by the formation of Linaro. Linaro's outputs will accelerate the abundance of new consumer products that use Linux-based distributions such as Android, LiMo, MeeGo, Ubuntu and webOS in conjunction with advanced semiconductor SoCs to provide the new features consumers desire at the lowest possible power consumption. + +"The dramatic growth of open source software development can now be seen in internet-based, always-connected mobile and consumer products," said Tom Lantzsch, executive officer, Linaro. "Linaro will help accelerate this trend further by increasing investment on key open source projects and providing industry alignment with the community to deliver the best Linux-based products for the benefit of the consumer." + +### Linaro Software and Tools + +Linaro will work with the growing number of Linux distributions to create regular releases of optimized tools and foundation software that can be used widely by the industry, increasing compatibility across semiconductors from multiple suppliers. As a result, Linaro's resources and open source solutions will allow device manufacturers to speed up development time, improve performance and reduce engineering time spent on non-differentiating, low-level software. Linux distributions, open source and proprietary software projects will benefit from Linaro's investment, with more stable code becoming widely available as a common base for innovation. + +To further its mission, Linaro aims to unite the open source engineering resources within its member firms with the broad open source community. Linaro engineers, leveraging their extensive embedded knowledge, will contribute to a wide range of open source projects covering areas such as tools, kernel, graphics and boot code. Linaro intends to work in partnership with the Linux Foundation to align on core operating principles. + +The company's first release is planned for November 2010 and will provide performance optimizations for SoCs based on the Arm Cortex™ A processor family. + +In addition to Arm and IBM, four of the world's leading application processor companies, Freescale, Samsung, ST-Ericsson and Texas Instruments, will align open source engineering efforts within Linaro. + +Linaro is a growing organization with additional partners expected to join, thus expanding the range of expertise that is brought to the open source community. Companies interested in joining are invited to discuss membership with Linaro executives. + +### Founding Member quotes + +"Arm and our partners have a long history of working with, and supporting, open source software development for complex SoCs based on the Arm architecture," said Warren East, Arm CEO. "As a founding member of Linaro, we are working together with the broader open source community to accelerate innovation for the next generation of computing, focusing on delivering a rich connected experience across the diversity of devices in our daily lives." + +"Freescale is taking a leadership position in shaping the future of consumer electronics by enabling entirely new categories of smart mobile devices," said Lisa Su, senior vice president and general manager of Freescale's Networking and Multimedia Group. "Linaro represents an important step forward in developing the ecosystem for these smart mobile devices through dramatically speeding and simplifying software development cycles and leveraging the power and strength of the open source community." + +"IBM believes that leadership with Linux solutions begins with effective collaboration in the community, and IBM's ten year history of working with the Linux community has resulted in a strong, mutually beneficial relationship," said Daniel Frye, vice president, open systems development, IBM. "IBM's ongoing collaboration has contributed to the widespread adoption of Linux throughout the data center.  We are strong proponents of working with partners such as Arm to further our commitment, ensuring Embedded Linux is the leading platform for innovation in the mobile and consumer electronics markets." + +"Samsung is an industry leader in high performance, low power application processors for mobile handset and other consumer devices.  We fully appreciate the significance of having an optimized Linux software foundation and tools for our Arm CPU core base products, to support our customers' needs with high quality solutions," said Yiwan Wong, vice president, System LSI marketing, Samsung Electronics.   "We are pleased to join Linaro as a foundation member and work together with Arm to serve the interests of our customers." + +"Open source has become an engine for innovation in the smart phone and consumer electronics market," said Teppo Hemia, vice president, 3G Multimedia Platforms Business Unit of ST-Ericsson." Being an active contributor in the open source community, we are excited to be one of the founding members of Linaro and expect our combined efforts to accelerate the development of Linux-based devices." + +"Linaro is intently focused on delivering critical open source components to enable developers building on Arm-based processors. An important element of that delivery is a more complete, higher quality development toolset that increases performance. In our leadership role, TI will support Linaro's efforts by leveraging our open source expertise-evidenced by our participation in Linux kernel enhancement submissions and our support of popular industry development boards," said Remi El-Ouazzane, vice president and general manager, OMAP Platform Business Unit, TI. + +### Industry quotes + +"The existence of Linaro will significantly simplify the process of making Linux-based consumer devices available to market," said Jane Silber, CEO of Canonical. "By standardising many of the core software components, companies can focus on creating great user experiences on embedded devices through to smart phones. Canonical is delighted to participate in what will be a significant driver of the success of Linux on Arm, in the consumer electronics market." + +"We welcome the launch of Linaro as a new industry organisation with similar values and a complementary focus to LiMo Foundation," said Morgan Gillis, executive director of LiMo Foundation. "We look forward to working collaboratively with Linaro to deliver greater efficiency within the mobile Linux value system." + +"The Linux Foundation is happy to see Arm and Linaro increase their investment in Linux," said Jim Zemlin, executive director of The Linux Foundation. "We are supportive of any investment that contributes to the mainline kernel and the many upstream open source projects that make up Linux based operating systems today." + +For more information on the company, access to software and tools, and information on the community and open engineering, visit[www.linaro.org](/) diff --git a/src/content/blogs/arm-linux-developers-25-companies-collaborate-linaro-connect-event-cambridge-uk.mdx b/src/content/blogs/arm-linux-developers-25-companies-collaborate-linaro-connect-event-cambridge-uk.mdx new file mode 100644 index 0000000..6bc4b37 --- /dev/null +++ b/src/content/blogs/arm-linux-developers-25-companies-collaborate-linaro-connect-event-cambridge-uk.mdx @@ -0,0 +1,42 @@ +--- +author: linaro +date: 2011-08-23T11:13:45.000Z +description: CAMBRIDGE, UK - 23 AUG 2011 +link: /news/arm-linux-developers-25-companies-collaborate-linaro-connect-event-cambridge-uk/ +title: Arm Linux Developers from over 25 Companies Collaborate at Linaro Connect + Event in Cambridge, UK +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 23 AUG 2011 + +## First Linaro Connect Event Enhances Device Tree Support to Accelerate Development of Linux on Arm + +More than 150 Linux engineers gathered in Cambridge this month to collaborate on the development of Linux on Arm at the first quarterly Linaro™TM Connect event. + +Linaro, the not-for-profit open source software engineering company founded by Arm, IBM, TI, Samsung, Freescale and ST-Ericsson is working on consolidation and optimization of Linux on Arm SoCs. Device Tree, a mechanism for describing hardware configuration for the Linux kernel, was among the many topics covered at the recent Linaro Connect event. Members of the Arm Linux community set out to simplify porting of the Linux kernel to the diverse platforms developed by Arm silicon vendors and ODMs. By the end of the weeklong event many patches had been produced that will be queued up for the Linux 3.2 release kernel tree including: + +* Device drivers for Freescale iMX converted to use Device Tree-based discovery. +* Code developed to bridge between the Device Tree model and TI OMAP's HWMOD, used to describe complex power and clock domains. +* Initial support for Device Tree added to Samsung Exynos, Qualcomm MSM86 and Atmel AT91 boards, including a serial console described by the Device Tree. +* Initial skeleton work for Device Tree implemented for the Arm Versatile board. + +"We have seen some pretty amazing output from the Linaro Connect," said Christian Reis, Vice President of Engineering at Linaro. "Not only around the Device Tree work, but also including major progress on the Continuous Memory Allocator, DMA mapping and buffer sharing frameworks in collaboration with the Arm kernel maintainers." + +"Linaro has come a long way since a year ago when I first attended a Linaro event in Prague," said Paul McKenney, distinguished engineer at IBM Linux Technology Center and IBM representative on the Linaro Technical Steering Committee. "The question in 2010 was 'can Linaro become relevant?' The answer in 2011 is obvious as you look at the large number of Linaro patches upstream in a number of projects, the number of users of Linaro's tool chain, the number of attendees, including many developers who are not Linaro assignees, as well as the level of energy in the sessions." + +Linaro Connect is a quarterly, week-long engineering event which brings together the Linaro engineering team, Linaro members, and others in the Arm Open Source community to work on the future of Linux on Arm. The events are a mixture of presentations, topic-based summits and software engineering sessions. + +The first Linaro Connect was held from 1-5 August in Cambourne, UK.  Over 150 engineers from more than 25 companies attended. "We have been delighted by the participation in Linaro Connect", said Stephen Doel, COO Linaro. "There is no other industry event that offers a deep focus on delivering Open Source engineering for Linux on Arm devices." + +Looking forward, Linaro plans to co-locate many of the quarterly Linaro Connect events with other major industry conferences. + +"We're pleased to see Linaro Connect co-locating with the Embedded Linux Conference and the Android Builders Summit," said Jennifer Cloer, director of communications and community at The Linux Foundation. "The Linux on Arm community is an important one and we look forward to fruitful collaboration between Linaro and the broader community of embedded Linux developers who attend our conferences." + +Linaro announced the dates of future Linaro Connect events: + +* October 31 - November 4, 2011 in Orlando, Florida, co-located with the Ubuntu Developer Summit +* February 6 - 10, 2012 in San Francisco, California, co-located with Embedded Linux Conference and Android Builders Summit to be held the following week +* May 7 - 11, 2012, co-located with Ubuntu Developer Summit diff --git a/src/content/blogs/arm-trustzone-qemu.mdx b/src/content/blogs/arm-trustzone-qemu.mdx new file mode 100644 index 0000000..5466a2c --- /dev/null +++ b/src/content/blogs/arm-trustzone-qemu.mdx @@ -0,0 +1,176 @@ +--- +excerpt: The blog post describes why introducing Arm TrustZone support in QEMU + is important and the accompanying benefits. The post outlines the turbulent + history behind the current development as well as an explanation of the added + features. +author: linaro +description: In this article, Linaro take a detailed look at why introducing Arm + TrustZone support in QEMU is important & the accompanying benefits. Read about + it here! +date: 2014-09-26T07:54:06.000Z +comments: false +title: Arm TrustZone in QEMU +tags: + - arm + - qemu + - security +link: /blog/core-dump/arm-trustzone-qemu/ +image: linaro-website/images/blog/Banner_Virtualization +related: [] + +--- + +Ever used an application on your smartphone or tablet that accesses security sensitive information such as banking, personal health information, or credit cards? The demand for mobile devices to do more and more is rapidly growing and includes increased security sensitive tasks. At the same time, malicious apps are also flooding mobile app stores in hopes of exploiting security holes to take advantage of unsuspecting users. + +Can we rely on certain apps to protect our personal data and prevent undesired and unauthorized access? The current solution is to present users with warning dialogs when downloading applications and otherwise trust the rest of the system. This does not work for a number of reasons. First, existing protection and isolation principles may not work. Second, applications may not be implemented according to secure programming guidelines. Third, other users of devices (such as children or friends) may download malicious applications without the main user realizing it. + +The proper solution is to improve the application development ecosystem so sensitive applications also become trusted applications and have the facilities to better protect our sensitive data. In order to promote such an ecosystem, it is important for these facilities to be readily available and widespread. Only then can data sensitive applications be made available in an efficient and timely manner. + +The Arm architecture is dominant mobile CPU architecture and already has the technology for providing such security, it’s called TrustZone. Unfortunately, developing applications for TrustZone is challenging, requires access to expensive hardware development kits, and often involves signing NDAs and custom licenses. + +### Tell me more about Arm TrustZone + +![Arm-TrustZone-Logo class=small-inline](/linaro-website/images/blog/Arm-TrustZone-Logo) + +Arm TrustZone is the term used to describe the Arm Security Extensions. Available since Armv6, the Arm Security Extensions define optional hardware security features for the Arm processor as well as other components of an Arm SoC. + +The Arm Security Extensions divide execution into separate secure and non-secure worlds on a single SoC. This division allows for strict hardware-based isolation between software executing in the normal (non-secure) world and the secure world, without the need for dedicated security hardware. Typically, a device will run its rich conventional OS, like Linux or Android, in the normal world, while running a small vendor specific secure OS and its applications in the secure world. + +The isolation between the normal and secure worlds is driven largely by an additional security state incorporated into many aspects of the architecture. A single secure state bit can determine the accessibility to certain system registers and memory as well as control where interrupts should be delivered. Similarly, devices on the bus may be configured as secure or nonsecure providing protection against undesired access. + +While the above technology exists for enabling secure compute, it is typically only available on costly and difficult to obtain development hardware. As well, the software for accessing these features is often proprietary and tightly controlled by hardware vendors. Overcoming these restrictions is key to the growth of secure computing by making the technology more generally available. QEMU is the ideal solution to addressing these limitations. + +### QEMU - Q What? + +![Qemu-logo class=small-inline](/linaro-website/images/blog/Qemu-logo) + +QEMU, short for “quick emulator”, is very widely used open source machine emulator. QEMU is capable of emulating a variety of client architectures across a number of host architectures through the use of dynamic binary translation. In addition to being a standalone emulator the QEMU sources are also the foundation for other emulated environments. Most notably, the Android Emulator, which is shipped as part of the Android SDK, is based on an older stripped down version of QEMU (go [here](/blog/running-64bit-android-l-qemu/) for more details). + +QEMU supports multiple emulation modes including full-system emulation of an entire system and its peripherals, as an emulated guest machine on a given host. One example would be emulating a virtual Arm Linux system on an x86 host. Alternatively, QEMU supports user-mode emulation which allows a single execution binary compiled for one architecture to be executed on a different host architecture. For example, executing gcc compiled for x86 on an Arm host. + +QEMU is open source and freely available, making it a cost-effective alternative to requiring actual hardware for development of secure software. Developers benefit from QEMU’s single system environment that utilizes familiar development and debug tools such as GDB.  Altogether, these conveniences allow for more efficient development and debug, resulting in quicker time-to-market solutions. Derivative technology, such as the Android Emulator, also benefits from the added features when based on the upstream version of QEMU. + +### Adding Arm TrustZone to QEMU + +#### Why should QEMU be trusted? + +The primary goal of adding the security extensions support to QEMU’s Arm target is to allow for development of secure software without the need for dedicated hardware. With Arm Security Extensions support in QEMU, users could conveniently load their trusted secure world binary alongside a rich OS running in the non-secure world, allowing full interaction while debugging both environments. + +![quem-trusted class=small-inline](/linaro-website/images/blog/quem-trusted) + +Developers can use the QEMU Arm Security Extensions to develop and work with Trusted Execution Environments (TEEs) that are likely to be the primary consumers of the added functionality. Secure applications can then be developed on the added TEEs without the need for dedicated hardware. + +Linaro is currently working on running open-source TEE (OP-TEE) software on top of QEMU for two reasons. Firstly, to provide a concrete real-world use case. Secondly, to stress-test the added QEMU functionality to insure proper operation. Linaro is already engaged in efforts of developing an open source TEE solution that will be a likely candidate. More details about the OP-TEE work can be found [here](/blog/op-tee-open-source-security-mass-market/). + +To reiterate, the addition of the Arm Security Extensions to QEMU allows for the coexistence of separate secure and non-secure software where QEMU emulates the architectural facilities that bridge the two worlds. + +#### Can QEMU be trusted? + +QEMU has made advances in supporting some of the latest Arm architectural features such as 64-bit and Armv8-A, however, it still lacks support for the Arm Security Extensions. Attempts to utilize features such as the *smc* instruction or secure registers will result in an undefined operation failure. + +Just as the Arm Security Extensions extend the Arm architecture, they can similarly extend QEMU’s functionality. QEMU’s system register management functionality must be extended to track the additional security specific system registers and system register secure banks that allow for separate configuration of the secure and non-secure worlds. Support for the added *smc* instruction and associated monitor exception mode must be added to allow software to transition between the secure and non-secure worlds. Additionally, QEMU’s memory management functionality must be extended to allow tracking and protection of secure memory accesses across the system. Lastly, QEMU’s Arm interrupt facilities must be extended to control accessibility to the interrupt controller as well as to enable secure interrupt grouping. + +### Turbulent development history + +#### Initial development + +![Qemu - timeline](/linaro-website/images/blog/quem-timeline) + +From August of 2011 to June 2013, Johannes Winter of the Graz University of Technology started developing QEMU TrustZone changes to the GitHub QEMU repository. + +Johannes’ initial changes included much of the Arm Security Extensions functionality seen in today’s latest patches. Changes included all the expected Arm Security Extension features such as secure system registers, monitor mode, the smc instruction and distinct secure world address spaces. Secure memory translation support was not included. + +In addition to the processor extensions, Johannes patches also included infrastructure and support for the Arm TrustZone TZC380 and BP147 peripheral controllers, virtualization register and exception support as well as extensions to GDB support for debugging secure registers. Arm GIC security extensions were not included. + +The code evolved over its two year development period but never made it into upstream QEMU. Although considered experimental and a work-in-progress, Johannes work has become the foundation for ongoing emulated Arm trusted environment development. + +#### Version 1 - Samsung’s contributions + +Six months after Johannes’ final committed work, Sergey Fedorov and Svetlana Fedoseeva from Samsung submitted patches for review based on Johannes’ final changes. While the patches mostly paralleled Johannes’ final work there were slight differences. + +The most significant of the changes to Johannes’ initial work was the redesign of the mechanism for selecting between the system registers banks. Rather than promote Johannes’ explicit bank access approach, Samsung adopted an active register mechanism that would context switch the banked registers on secure state change. This approach would eventually be criticized during review for its added overhead. In addition, certain functionality from Johannes’ final work was omitted including support for the TrustZone peripheral controllers and GDB secure register support. + +Shortly after the initial request for comments, Samsung orphaned the patches leaving the effort unmaintained.  Details on Samsung’s v1 patches can be found [here](http://lists.nongnu.org/archive/html/qemu-devel/2013-12/msg00261.html). + +#### Version 2 - Linaro gets involved + +In March of this year, Linaro began evaluating the pieces left behind by Samsung in part due to Qualcomm’s interest in having Arm Security Extensions support in QEMU. Corrections were underway to address prior feedback on Samsung’s review comments with hopes of sending version 2 of the TrustZone patches out for review. The most significant effort would be addressing the secure banked system register mechanism. + +At the beginning of May of this year, Edgar Iglesias from Xilinx sent a set of patches out for review containing changes preparing for modeling of Armv8-A EL2/EL3 support in QEMU. The changes primarily included infrastructure support for extending the number of supported exception levels in AArch64. Although minimal, there was slight overlap in the naming of and method for accessing common security related resources. + +To Linaro’s surprise and shortly before Linaro’s version 2 patches were ready, Fabian Aggeler, a student from ETH Zürich beat Linaro to the punch and sent out for review his own follow-on to Samsung’s patches. + +It was no surprise that Fabian’s changes were similar to Linaro’s as we were both addressing the same review feedback. The primary difference in the changes was the design used for managing and addressing the secure banked system registers. After consideration and consultation within the QEMU community, the decision was made to move forward with Fabian’s approach. Ironically, the approach is very close to Johannes’ original approach. In addition, Fabian also made changes around the ongoing AArch64 changes made since Samsung’s patches. Details on Fabian’s v2 patches can be found [here](http://lists.nongnu.org/archive/html/qemu-devel/2014-05/msg02522.html). + +#### Version 3 - Linaro takes over + +Moving forward, Linaro embraced Fabian’s changes, and accepted the role of reviewing the ongoing work by both Fabian and Edgar. After receiving extensive comments on his version 2 patchset, Fabian would eventually submit version 3 for review, but with a caveat. Fabian needed to relinquish ownership of the TrustZone patches so he could concentrate on school work. Committed to seeing the TrustZone functionality in QEMU, Linaro stepped up and took over Fabian’s patches. Details on Fabian’s v3 patches can be found [here](http://lists.nongnu.org/archive/html/qemu-devel/2014-06/msg02558.html). + +In the meantime, Edgar was able to get his first Armv8-A EL2/EL3 patchset approved and committed upstream. This was shortly followed by a second patchset enabling certain aspects of the Armv8-A EL2/EL3 exception model. Details on Edgar’s approved patches can be found [here](http://lists.nongnu.org/archive/html/qemu-devel/2014-05/msg05035.html). + +#### Version 4 & 5 - Linaro’s contributions + +Today, development is ongoing, with Linaro awaiting review comments on version 4 of the original patchset. The patchset primarily consists of fixes for feedback on the version 3 patches. Not far behind, version 5 is underway and includes minor fixes discovered in testing and will address version 4 feedback. It is targeted at being the upstream version. Details on Linaro’s v4 patches up for review can be found [here](http://lists.nongnu.org/archive/html/qemu-devel/2014-06/msg07347.html). + +As well, Edgar’s development is still underway as he is wrapping up his second set of Armv8-A EL2/EL3 changes, which are still being monitored and coordinated with Linaro’s changes. Fabian and Sergey have actively been commenting on the outstanding changes. Details on Edgar’s latest patches can be found [here](http://lists.nongnu.org/archive/html/qemu-devel/2014-08/msg02858.html). + +#### Future updates and ongoing work + +In addition to the above processor security extension development, both Edgar and Fabian have been developing QEMU GIC security extensions functionality. Fabian has submitted his patches to the QEMU working group and Linaro has agreed to take ownership of the patches to see them through. + +In addition to the future GIC work, Linaro will continue to pursue a full QEMU TrustZone solution. + +### TrustZone QEMU availability + +#### Where can I find it? + +The latest QEMU TrustZone support is available in the below Linaro git repository: + +[https://git.linaro.org/virtualization/qemu-tz.git](https://git.linaro.org/virtualization/qemu-tz.git) + +To acquire a buildable version of QEMU: + +```bash + $ git clone https://git.linaro.org/virtualization/qemu-tz.git --branch qemutz +``` + +#### How do I build it? + +To build the QEMU (from the QEMU root directory): + +```bash + $ ./configure --target-list=arm-softmmu + $ make +``` + +**How do I run it?** + +In order to take advantage of QEMU’s security extensions, you have to have an image capable of providing a secure and non-secure contexts. Without this, it is not possible to take advantage of the TrustZone features. If you are interested in checking whether the TrustZone enabled QEMU still works, take a stab at booting your favorite Arm 1176 or Cortex-A8/A9/A15 Linux kernel as follows from the QEMU root directory: + +```bash + $ ./arm-softmmu/qemu-system-arm -kernel $PATH_TO_KERNEL/zImage -M vexpress-a15 -cpu cortex-a15 -dtb PATH_TO_DTB/vexpress-v2p-ca15-tc1.dtb -m 1024 -append 'console=ttyAMA0,38400n8' -serial stdio -initrd $PATH_TO_INITRD/initrd.img +``` + +**How do I run a secure image?** + +In order to take advantage of QEMU’s support for the Arm Security Extensions, different command line options are used to start the user off in a secure PL1 mode. As mentioned earlier, the -bios command line option is used to initiate execution of a raw binary image starting at address 0x0 in a secure PL1 mode. This option replaces the standard options used when booting a standalone OS kernel, such as -kernel, -dtb, and -initrd. Support of the -bios option is currently limited to Arm Versatile Express models using Cortex A9 or A15 processors. + +```bash + $ ./arm-softmmu/qemu-system-arm -bios $PATH_TO_IMAGE/image -M vexpress-a15 -cpu cortex-a15 -m 1024 -append 'console=ttyAMA0,38400n8' -serial stdio +``` + +### References + +\[1] [http://www.arm.com/products/processors/technologies/TrustZone/index.php](http://www.arm.com/products/processors/technologies/trustzone/index.php) + +\[2] [http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.prd29-genc-009492c/ch04s01s01.html](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.prd29-genc-009492c/ch04s01s01.html) + +\[3] [http://wiki.qemu.org/Main\_Page](http://wiki.qemu.org/Main_Page) + +\[4] [https://github.com/jowinter/qemu-TrustZone](https://github.com/jowinter/qemu-trustzone) + +\[5] DDI0406C Arm® Architecture Reference Manual - Armv7-A and Armv7-R edition + +### Author + +**Greg Bellows** and **Christoffer Dall** diff --git a/src/content/blogs/bitmain-joins-96boards.mdx b/src/content/blogs/bitmain-joins-96boards.mdx new file mode 100644 index 0000000..d9a9b1a --- /dev/null +++ b/src/content/blogs/bitmain-joins-96boards.mdx @@ -0,0 +1,42 @@ +--- +title: Bitmain joins Linaro 96Boards Steering Committee, announces Sophon BM1880 + Edge Development Board +date: 2018-11-08T07:00:00.000Z +image: linaro-website/images/blog/96boards-specification-consumer-edition-v2 +tags: + - arm + - open-source +author: linaro +description: Linaro Ltd, announced today that leading ASIC-based cryptocurrency + mining hardware provider Bitmain has joined the 96Boards initiative as a + Steering Committee member and has announced an initial 96Boards product, the + Sophon BM1880 Edge Development Board (EDB). +related: [] + +--- + +\[Cambridge, UK; 8 November 2018] Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, announced today that leading ASIC-based cryptocurrency mining hardware provider Bitmain has joined the 96Boards initiative as a Steering Committee member and has announced an initial 96Boards product, the Sophon BM1880 Edge Development Board (EDB). + +Bitmain launched the BM1880 edge AI chip in October. This is Bitmain’s first AI chip providing high performance, low power Deep Learning computing power for edge devices. The Bitmain BM1880 enables cutting-edge deep learning techniques, like facial recognition, in real-time on small devices without needing an Internet connection. To help develop broad range of AI applications, Bitmain also provides a comprehensive toolchain, including compiler and quantization tools, for developers. The Sophon BM1880 EDB is the first product available to developers who wish to develop with the new chip. + +Bitmain began working with 96Boards after identifying the program as an effective vehicle to give developers access to this new technology. 96Boards is Linaro’s initiative to build a single worldwide software and hardware community across low-cost development boards using the latest Arm technology. A large range of products compliant with the 96Boards specifications is available worldwide and this is supplemented with additional hardware functionality provided through standardized mezzanine boards. The 96Boards Steering Committee provides a neutral forum in which companies can cooperate closely to offer semiconductor products in a standard form factor for rapid product development, testing and prototyping. + +“Bitmain has been at the forefront of cryptocurrency and blockchain ASIC development since the company’s founding in 2013, and developed its AI product line from late 2015” said Darren Tsao, Director of Bitmain Edge AI Product Marketing. “We have chosen to work with 96Boards to make our latest edge AI technology available to developers of artificial intelligence (AI) solutions worldwide in a standard form factor supported by an active community producing innovative real-world solutions.” + +The new Bitmain Sophon BM1880 EDB is the first ASIC-based solution to join the 96Boards.ai line up, which already includes boards bringing the latest CPU, GPU, FPGA, DSP and NPU silicon to developers in a standard form factor. The EDB conforms to the 96Boards CE specification, and key features include dual Arm Cortex-A53 cores, a Bitmain Sophon edge TPU unit offering 1 TOP performance on 8-bit integer operations, USB 3.0 and gigabit Ethernet. Further information about the board is available on the 96Boards.ai website. + +“96Boards has been pushing to extend the choice of latest silicon solutions that are available on boards at affordable prices,” said Yang Zhang, Director of 96Boards. “We’re very excited to add the first ASIC solution to the 96Boards.ai line up and look forward to working with Bitmain and other partners to push this technology into new spaces.” + +### About Bitmain + +Founded in 2013, Bitmain transforms computing by building industry-defining technology in cryptocurrency, blockchain, and artificial intelligence (AI). The company also operates the world’s largest and second largest Bitcoin mining pools in terms of computing power, BTC.com and Antpool. + +A leader in the still-nascent, high-growth blockchain technology ecosystem, Bitmain supports a wide array of blockchain platforms and startups, and actively participates in industry and community development. Bitmain has also been active in the AI industry since late 2015, launching its first AI deep learning chip in 2017 under the Sophon brand, followed by a second-generation AI chip with five times the performance. These AI chip products extend capabilities in applications such as machine vision, data centers, supercomputing and robotics. + +Bitmain is headquartered in Beijing with offices throughout China, including Hong Kong, and with global offices worldwide. Visit [https://www.bitmain.com](https://www.bitmain.com) for further information. + +### About Linaro + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/bof-device-tree-secure-firmware-bud17-313.mdx b/src/content/blogs/bof-device-tree-secure-firmware-bud17-313.mdx new file mode 100644 index 0000000..523caa3 --- /dev/null +++ b/src/content/blogs/bof-device-tree-secure-firmware-bud17-313.mdx @@ -0,0 +1,20 @@ +--- +title: Bud17-313 BoF - Device Tree and Secure Firmware +date: 2017-03-17T12:00:00.000Z +author: linaro +description: Device Tree is well established in the Linux kernel. But since there could be other bootloader(s) and firmware components involved that needs to configure the hardware and thereby also needs to update the Device Tree blobs before passing it to Linux kernel. +tags: [] +related: [] + +--- + + + +Device Tree is well established in the Linux kernel. But since there could be other bootloader(s) and firmware components involved that needs to configure the hardware and thereby also needs to update the Device Tree blobs before passing it to Linux kernel. Therefore we are looking for a well established way for firmware to also make use and modify the Device Tree blobs before handing them over to Linux kernel. With this BoF session we would like to get started a gather ideas etc + + + +[BUD17-416: Benchmark and profiling in OP-TEE ](https://www.slideshare.net/linaroorg/bud17416-benchmark-and-profiling-in-optee) from [Linaro](http://www.slideshare.net/linaroorg) +**Speakers:** Joakim Bech, Jens Wiklander +**Track:** Security +**Session ID:** BUD17-313 diff --git a/src/content/blogs/ceo-george-grey-opens-linaro-connect-europe-2013-lce13-dublin-ireland.mdx b/src/content/blogs/ceo-george-grey-opens-linaro-connect-europe-2013-lce13-dublin-ireland.mdx new file mode 100644 index 0000000..02cfc73 --- /dev/null +++ b/src/content/blogs/ceo-george-grey-opens-linaro-connect-europe-2013-lce13-dublin-ireland.mdx @@ -0,0 +1,31 @@ +--- +author: linaro +date: 2013-07-08T11:26:43.000Z +description: Industry leaders are gathering in Dublin, Ireland this week to + define the future of Linux on Arm at Linaro Connect +excerpt: Industry leaders are gathering in Dublin, Ireland this week to define + the future of Linux on Arm at Linaro Connect +link: /news/ceo-george-grey-opens-linaro-connect-europe-2013-lce13-dublin-ireland/ +title: CEO George Grey Opens Linaro Connect Europe 2013 (LCE13) in Dublin, Ireland +tags: [] +related: [] + +--- + +DUBLIN, IRELAND - 8 JUL 2013 + +## Industry leaders are gathering in Dublin, Ireland this week to define the future of Linux on Arm at Linaro Connect + +Linaro Connect Europe (LCE13) began today with an opening keynote by George Grey, CEO of Linaro, the not-for-profit engineering organization developing open source software for the Arm® architecture. Over 300 engineers from Linaro, Linaro’s 25 member companies and more than 20 other companies have gathered together to discuss and develop the future of Linux on Arm at the eleventh Linaro Connect. + +Grey explained how the explosion in digital data exchange and usage of a rapidly expanding array of different devices is placing new demands on both hardware and software development. These demands offer opportunities for many companies, but a significant amount of common development work is a ripe target for collaboration, which will enable accelerated innovation and increased differentiation, leading to improved consumer choice. + +“Linaro Connect has grown at each event and I am happy to say this week again promises to be the biggest and most productive yet,” said Grey. “Linaro membership has grown significantly over the last year and we are now not only driving new Linux technology development for mobile devices, but also for servers and networking.” + +Grey also introduced the first of a full lineup of external keynote speakers presenting during the week: Parallels CTO James Bottomley kicked off the keynotes following Grey on Monday 8 July, then Wannes De Smet from Sizing Servers will be sharing his review of the first enterprise-class Arm server on Tuesday, Red Hat's Leslie Hawthorn explains best practices for overcoming challenges to collaboration, and COO of the Open Compute Project, Cole Crawford, introduces the project on Wednesday, and finally Bob Monkman from Arm will talk about Software Defined Networking on Thursday 11 July. These keynotes are in addition to the regular working group and hacking sessions that discuss the latest challenges facing the ecosystem and propose, develop and test solutions. Jon “maddog” Hall is also joining Linaro Connect to talk about collaborative work on performance tuning of open source software to support new Armv8 64-bit chips. + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 170 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit . diff --git a/src/content/blogs/converting-code-implementing-suspend-blockers.mdx b/src/content/blogs/converting-code-implementing-suspend-blockers.mdx new file mode 100644 index 0000000..7e5be59 --- /dev/null +++ b/src/content/blogs/converting-code-implementing-suspend-blockers.mdx @@ -0,0 +1,118 @@ +--- +keywords: Linaro, Android, Linux, Opensource, Android Kernel, Linux kernel, wake + locks, wakeup sources +title: Converting Code Implementing Suspend Blockers +description: This article discusses the issues of suspend blockers originally + implemented in Android kernel & similar functionality merged upstream in Linux + kernel. +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +author: zoran-markovic +date: 2013-08-26T12:33:07.000Z +tags: + - android + - linux-kernel +link: /blog/android-blog/converting-code-implementing-suspend-blockers/ +related: [] + +--- + +## Abstract + +This article discusses the issues of suspend blockers originally implemented in the Android kernel and similar functionality merged upstream in the Linux kernel, termed “wake locks” and “wakeup sources”, respectively. The author points out the analogy between the two implementations and, in conclusion, proposes an approach for converting older code from using wake locks to use wakeup sources. + +## Background + +In the past, the Android and Linux development communities had different (and sometimes opposing) viewpoints on power management. The fact that Linux community worked most of the time with devices that were connected to a power source while the Android community worked with battery-powered devices resulted in two strategies in the field of power management: + +* Non-aggressive suspend strategy, where a hardware block can be put into low-power mode or completely powered off if it is not used, and + +* Aggressive suspend strategy, where a hardware block should be powered on only when needed. + +These different viewpoints were best summarized in the white paper by Rafael J. Wysocki [“Technical Background of the Android Suspend Blockers Controversy”](http://lwn.net/images/pdf/suspend_blockers.pdf). + +Most of the issues between communities have now been laid to rest with the Linux community adopting functionality similar to what was used on Android devices, making it possible to support both strategies in the upstream kernel. + +## Android Implementation + +From the Android prospective, the system stays in suspend state most of the time and should only be awake if absolutely necessary, i.e. if there is at least one system component that remains active. This led to the introduction of wake locks in the Android kernel. Defined in simple terms, a wake lock is a binary kernel object that is acquired by a subsystem whenever it needs to keep the system awake. The kernel monitors all wake locks and executes a system suspend only when none of the wake locks are held. This behaviour is similar (or better said a “mirror image”) to a counting semaphore, where a semaphore count would correspond to the number of active wake locks: the suspend procedure would wait for the count to become zero before running. If - at any point during the suspend procedure - any of the subsystems requires the system to stay awake, it would acquire its wake lock which would immediately abort the suspend in progress. The latter mechanism is used in particular by wakeup interrupts to prevent racing with a suspend request currently in progress. + +From the kernel side, wake locks were manipulated using the following kernel functions: + +* wake\_lock\_init() - create and initialize a wake lock + +* wake\_lock\_destroy() - delete a wake lock + +* wake\_lock() - acquire the wake lock + +* wake\_unlock() - release the wake lock + +* wake\_lock\_timeout() - acquire a wake lock and release it after timeout expires. + +On Android systems, wake locks could also be manipulated from userland through the /sys/power interface using the following files: + +* /sys/power/wake\_lock - writing a string to this file would create/acquire a wake lock with that name + +* /sys/power/wake\_unlock - writing a string to this file would release a wake lock with that name + +Although disputed, this userland interface to wake locks has now been merged into upstream kernel and is available with the CONFIG\_PM\_WAKELOCKS configuration option. + +## Linux Implementation + +Linux developers admitted that it is important to have system objects signaling the system to stay awake. They argued that device drivers - rather than userland - would need to have this signaling capability, so the Android approach was gradually adopted in small chunks. + +First, a wakeup\_source object was added to devices’ power management block (struct dev\_pm\_info) to avoid race conditions between wakeup and suspend events. To manipulate the device’s wakeup\_source object, the following kernel functions were added: + +* device\_init\_wakeup() - when called with enable==1, initialize the device’s wakeup\_source, when called with enable==0, disable the device’s wakeup\_sorce + +* pm\_stay\_awake() - notify the system that a device is processing a wakeup event + +* pm\_relax() - notify the system that a device is no longer processing a wakeup event + +* pm\_wakeup\_event() - notify the system that the device will be processing the wakeup event until timeout + +All of these functions have an argument representing the device’s struct device object, indicating the device to which a wakeup\_source and wakeup event are associated. + +Next, the autosleep (a.k.a. opportunistic suspend) functionality was added to the kernel to automatically trigger a suspend whenever there are no wakeup sources held. The added functionality could be used in conjunction with driver suspend/resume hooks to implement power-saving modes for the system. Along with runtime suspend and auto-suspend features built into device drivers, this power management infrastructure was also meant to be a replacement for the much disputed early suspend/late resume functionality provided in earlier Android kernels. + +In similarity with the original Android wake lock implementation, Linux developers also added kernel functions that manipulate the wakeup\_source object directly: + +* wakeup\_source\_init() - initialize a wakeup source object +* wakeup\_source\_trash() - de-initialize a wakeup source +* \_\_pm\_stay\_awake() - notify the system that a wakeup event is being processed +* \_\_pm\_relax() - notify the system that a wakeup event is no longer being processed +* \_\_pm\_wakeup\_event() - notify the system that a wakeup event will be processed until timeout + +Unlike their device counterparts, these functions have a pointer argument representing the associated wakeup\_source object and could also be used to manipulate wakeup sources that are not associated with any device/driver. + +One can easily notice the following analogy: + +* `struct wake_lock <-> struct wakeup_source` +* `wake_lock_init() <-> wakeup_source_init()` +* `wake_lock_destroy() <-> wakeup_source_trash()` +* `wake_lock() <-> \_\_pm_stay_awake()` +* `wake_unlock() <-> \_\_pm_relax()` +* `wake_lock_timeout() <-> \_\_pm_wakeup_event()` + +## Conclusion + +The above analogy lends itself to a straightforward way of converting code using wake locks to use wakeup sources, in particular: + +1. Replace struct wake\_lock with struct wakeup\_source. +2. Replace instances of wake\_lock\_init() with wakeup\_source\_init(). +3. Replace instances of wake\_lock\_destroy() with wakeup\_source\_trash(). +4. Replace instances of wake\_lock() with \_\_pm\_stay\_awake(). +5. Replace instances of wake\_unlock() with \_\_pm\_relax(). +6. Replace instances of wake\_lock\_timeout() with \_\_pm\_wakeup\_event(). + +Android alarm-dev driver is one example of how this type of conversion was used in the upstream kernel, as seen in kernel commit [a180c0d659f604568637336a00c0c3ca2f7b094a](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/drivers/staging/android/alarm-dev.c?id=a180c0d659f604568637336a00c0c3ca2f7b094a). + +In cases where a device driver is using a per-device wake lock, a better and more elegant way to convert the code is to use struct wakeup\_source embedded in the device’s struct dev\_pm\_info field: + +1. Remove the instance of struct wake\_lock associated with the device. Newly introduced functions will be using the device object (struct device)pointer argument instead of the pointer to the wake lock object. +2. Replace instances of wake\_lock\_init() with device\_init\_wakeup() with argument enable set to 1. +3. Replace instances of wake\_lock\_destroy() with device\_init\_wakeup() with argument enable set to 0. +4. Replace instances of wake\_lock() with pm\_stay\_awake(). +5. Replace instances of wake\_unlock() with pm\_relax(). +6. Replace instances of wake\_lock\_timeout() with pm\_wakeup\_event(). + +Android kernel still provides the wake lock interface for compatibility with older drivers. A quick look into header file include/linux/wakelock.h in Android kernel, however, reveals that this is now just a wrapper for the wakeup source interface in the upstream kernel. There is no indication of how long this compatibility layer will be maintained. To future-proof their code, driver authors are advised to migrate towards using the wakeup source interface directly. diff --git a/src/content/blogs/coresight-perf-and-the-opencsd-library.mdx b/src/content/blogs/coresight-perf-and-the-opencsd-library.mdx new file mode 100644 index 0000000..20f3a7c --- /dev/null +++ b/src/content/blogs/coresight-perf-and-the-opencsd-library.mdx @@ -0,0 +1,266 @@ +--- +excerpt: > + Learn how the CoreSight framework found in the Linux kernel has been + integrated with the standard Perf core, both at the kernel and user space + level. In the latter part the newly introduced Open CoreSight Decoding + Library (OpenCSD) is used to assist with trace decoding. The topic of trace + decoding with openCSD will be covered in an upcoming post. +keywords: CoreSight, Perf, OpenCSD Library +description: Mathieu Poirier looks at how the CoreSight framework, found in + Linux kernel, has been integrated with standard Perf core, both at kernel & + user space level. +image: linaro-website/images/blog/Banner_Linux_Kernel +tags: + - linux-kernel +author: mathieu-poirier +title: CoreSight, Perf and the OpenCSD Library +date: 2016-06-27T21:45:43.000Z +link: /blog/core-dump/coresight-perf-and-the-opencsd-library/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner url=https://wiki-archive.linaro.org/CoreDevelopment](/linaro-website/images/blog/core-dump) + +### **Introduction** + +In this article we explain how the CoreSight framework found in the Linux kernel has been integrated with the standard Perf core, both at the kernel and user space level.  In the latter part the newly introduced Open CoreSight Decoding Library (OpenCSD) is used to assist with trace decoding. The topic of trace decoding with openCSD will be covered in an upcoming post. + +All examples presented in this post have been collected on a juno-R0 platform using code that is [public and accessible to everyone](https://github.com/Linaro/OpenCSD). + +### **Background on Perf and the Performance Management Units** + +The standard Perf core is a performance analysis tool found in the Linux kernel. It comes with a complement user space tool, simply called *perf*, that provides a suite of sub-commands to control and present trace profiling sessions. Perf is most commonly used to access SoC performance counters, but over the years it has grown well beyond that and now covers tracepoints, software performance counters and dynamic probes. + +The perf core is generic and caters to many architectures. To hide variations between HW implementation and profiling metrics the concept of Performance Monitoring Unit (PMU) is used. A PMU is a [structure](https://elixir.bootlin.com/linux/v4.6/source/include/linux/perf_event.h#L223) providing a well defined set of interfaces that PMU drivers implement in order to carry action on behalf of the Perf core. The actions carried out the by the PMU drivers are not relevant to the Perf core itself, as long as the semantic of the API is respected. + +Every time a process is installed on a CPU for execution, the scheduler invokes the Perf core.  From there Perf will see if any event is associated with that process and if so, the PMU API performing HW specific operations is invoked. The same happens when the process is removed from a CPU. That way statistics and performance counters are collected for that process only and aren’t impacted by other activities concurrently happening in the system. Traces collected during a session are transferred to user space using a mmap’ed area and made available to users in the *perf.data* file. The latter is then read by the various *perf* [sub-command](https://perf.wiki.kernel.org/index.php/Main_Page) for rendering in human readable format. + +Integrating the CoreSight drivers with the Perf core was advantageous on many fronts. On the kernel side it streamlined the configuration of trace sessions - with hundreds of parameters per CPU this was certainly not something to pass on. It also offered a way to easily transfer massive amounts of trace data to user space with little overhead. In user space the metadata pertaining to each trace session could be embedded in the *perf.data* file and *perf* sub-commands like *report* and *script* used to decode trace data. Last but not least most of the upstream code can be re-used in the PMU abstraction. + +### **Integration of CoreSight with the Perf Framework** + +#### The kernel side + +To bridge the gap between the CoreSight framework and the Perf core, CoreSight tracers (ETMv3/4 and PTM) are modelled as PMUs.  At boot time the newly introduced function [*etm\_perf\_init()*](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/drivers/hwtracing/coresight/coresight-etm-perf.c?id=refs/tags/v4.7-rc1#n370) registers an *etm\_pmu* with the perf core: + +*#define CORESIGHT\_ETM\_PMU\_NAME “cs\_etm”* + +*static struct pmu etm\_pmu;* + +*…* + +```c +static int __init etm_perf_init(void) +{ +         int ret; +  +         etm_pmu.capabilities    = PERF_PMU_CAP_EXCLUSIVE; +  +         etm_pmu.attr_groups     = etm_pmu_attr_groups; +         etm_pmu.task_ctx_nr     = perf_sw_context; +         etm_pmu.read            = etm_event_read; +         etm_pmu.event_init      = etm_event_init; +         etm_pmu.setup_aux       = etm_setup_aux; +         etm_pmu.free_aux        = etm_free_aux; +         etm_pmu.start           = etm_event_start; +         etm_pmu.stop            = etm_event_stop; +         etm_pmu.add             = etm_event_add; +         etm_pmu.del             = etm_event_del; +         etm_pmu.get_drv_configs = etm_get_drv_configs; +         etm_pmu.free_drv_configs = etm_free_drv_configs; +                                 +        ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); +        if (ret == 0) +                 etm_perf_up = true; +  +         return ret; + } +device_initcall(etm_perf_init); + + +``` + +Calling *perf\_pmu\_register() \_creates a new PMU with the characteristics found in the \_struct pmu* given as a parameter.  When a successful registration has completed the new PMU can be found alongside the other PMUs catalogued at boot time: + +```bash +linaro@linaro-nano:~$ +linaro@linaro-nano:~$ ls /sys/bus/event_source/devices/ +breakpoint  cs_etm  software  tracepoint +linaro@linaro-nano:~$ +linaro@linaro-nano:~$ ls /sys/bus/event_source/devices/cs_etm +cpu0  cpu1  cpu2  cpu3  cpu4  cpu5  format  perf_event_mux_interval_ms  power  subsystem  type  uevent +linaro@linaro-nano:~$ +``` + +The astute reader will notice that cpu\[0… 5] are not part of the typical sysFS entries associated with PMUs, and they will be correct.  Upon successful registration with the CoreSight core, the ETMv3/PTM and ETMv4 drivers create a symbolic link between their sysFS entries and the new\* cs\_etm\* PMU, allowing the Perf user space API to quickly retrieve the metadata associated with a tracer: + +```bash +linaro@linaro-nano:~$ ls -l /sys/bus/event_source/devices/cs_etm/cpu0 +lrwxrwxrwx 1 root root 0 Jun  1 20:19 /sys/bus/event_source/devices/cs_etm/cpu0 -> ../platform/23040000.etm/23040000.etm +linaro@linaro-nano:~$ +linaro@linaro-nano:~$ ls /sys/bus/event_source/devices/cs_etm/cpu0/trcidr/ +trcidr0  trcidr1  trcidr10  trcidr11  trcidr12  trcidr13  trcidr2  trcidr3  trcidr4  trcidr5  trcidr8  trcidr9 +linaro@linaro-nano:~$ +linaro@linaro-nano:~$ ls /sys/bus/event_source/devices/cs_etm/cpu0/mgmt/ +trcauthstatus  trcdevid    trclsr    trcpdcr  trcpidr0  trcpidr2  trctraceid trcconfig      trcdevtype  trcoslsr  trcpdsr  trcpidr1  trcpidr3 +linaro@linaro-nano:~$ +``` + +#### The user space side + +In user space integration is done around three tools: *perf record*, *perf report* and *perf script*, which are the perf sub-commands we have been referring to.  The first deals with event configuration and creation while the latter two assist in rendering trace data collected during a session in a human readable format. + +##### **perf record** + +Integration in the *perf record* sub-command is done by providing an architecture specific function that return a [struct auxtrace\_record](https://elixir.bootlin.com/linux/v4.6/source/tools/perf/util/auxtrace.h#L292).  As with the kernel PMU abstraction the auxtrace\_record structure allows the generic core to perform architecture-specific operations without losing genericity.  That way it is possible to process traces data generated by IntePT and CoreSight without changing anything to the common core. + +```c +struct auxtrace_record *cs_etm_record_init(int *err) +{ +       struct perf_pmu \*cs_etm_pmu; +       struct cs_etm_recording \*ptr; + +       cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME); + +       [clip…] + +       ptr->cs_etm_pmu                 = cs_etm_pmu; +       ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options; +       ptr->itr.recording_options      = cs_etm_recording_options; +       ptr->itr.info_priv_size         = cs_etm_info_priv_size; +       ptr->itr.info_fill              = cs_etm_info_fill; +       ptr->itr.find_snapshot          = cs_etm_find_snapshot; +       ptr->itr.snapshot_start         = cs_etm_snapshot_start; +       ptr->itr.snapshot_finish        = cs_etm_snapshot_finish; +       ptr->itr.reference              = cs_etm_reference; +       ptr->itr.free                   = cs_etm_recording_free; +       ptr->itr.read_finish            = cs_etm_read_finish; + +       \*err = 0; +       return &ptr->itr; +out: +       return NULL; +} + +``` + +Among other things, functions provided to the *struct auxtrace\_record* deal with how to find tracer specific metadata, the presentation and formatting of the metadata in the *perf.data* file along with specifics related to the size and mapping of the ring buffer shared between the kernel and user space.  That ring buffer is then used to retrieve trace data from the kernel. + +##### **perf report and perf script** + +The decompression and rendering of trace data is done in the *report* and *script* utilities.  The process starts by reading the *perf.data* file and parsing each of the events that were generated during a trace session.  The AUXTRACE*INFO and PERF\_RECORD\_MMAP2 are especially important.  The first event carries a wealth of information about how the tracers were configured, the so called metadata, and a list of offsets in the \_perf.data* file where lumps of trace data are located.  These offsets are recorded for later processing. + +PERF*RECORD\_MMAP2 events carry the name and path of the binary and libraries that were loaded/executed during the trace session.  Those are commonly called \_Dynamic Shared Object*, or DSO.  Having a handle on the DSOs is important for trace decoding since some branch point don’t carry the destination address, only that the branch point was taken or not.  In those cases the code needs to be read to find out where execution resumed. + +Once all that information has been tallied decoding of the trace data can begin.  The process is done by feeding the previously recorded trace data offsets to the *decoder*.  The *decoder* is an instantiated object provided by the openCSD companion library.  It decodes trace data lumps in steps, calling a user provided callback function with each successful round . + +*Un-synthesised output will look like this:* + +```c +mpoirier@t430:~/work/linaro/coresight/bkk16/jun01-kernel$ ../../kernel-cs-pm/tools/perf/perf report --stdio --dump +. ... CoreSight ETM Trace data: size 162416 bytes + 0: I_ASYNC : Alignment Synchronisation. + 12: I_TRACE_INFO : Trace Info. + 17: I_TRACE_ON : Trace On. + 18: I_ADDR_CTXT_L_64IS0 : Address & Context, Long, 64 bit, IS0.; Addr=0xFFFFFFC000531720; Ctxt: AArch64,EL1, NS; + 28: I_ATOM_F2 : Atom format 2.; NE + 29: I_ADDR_L_64IS0 : Address, Long, 64 bit, IS0.; Addr=0xFFFFFFC000536038; + 39: I_ATOM_F2 : Atom format 2.; EE + 40: I_ADDR_S_IS0 : Address, Short, IS0.; Addr=0xFFFFFFC0005366CC ~[0x166CC] + 43: I_ATOM_F1 : Atom format 1.; E + 44: I_ADDR_S_IS0 : Address, Short, IS0.; Addr=0xFFFFFFC000531BC0 ~[0x11BC0] + 48: I_ATOM_F3 : Atom format 3.; NEE + 49: I_ADDR_S_IS0 : Address, Short, IS0.; Addr=0xFFFFFFC000531F54 ~[0x11F54] + 52: I_ATOM_F1 : Atom format 1.; E + 53: I_ADDR_L_32IS0 : Address, Long, 32 bit, IS0.; Addr=0x0016BB60; + 58: I_ATOM_F3 : Atom format 3.; NEE + 59: I_ATOM_F3 : Atom format 3.; NNE + 60: I_ATOM_F6 : Atom format 6.; EEEEEE + 61: I_ADDR_S_IS0 : Address, Short, IS0.; Addr=0x0016BBF4 ~[0x1F4] + 64: I_ATOM_F1 : Atom format 1.; E + 65: I_ADDR_S_IS0 : Address, Short, IS0.; Addr=0x0016BD44 ~[0xBD44] + 68: I_ATOM_F3 : Atom format 3.; NNE + 69: I_ATOM_F1 : Atom format 1.; E +``` + +This raw trace packet output, ETMv4 in this case, is great for infrastructure debugging but of little value for system troubleshooting scenarios. These packets are further decoded by the OpenCSD library into a set of generic packets, describing core state and instruction ranges executed. The *report and script* commands will filter the packets they get back from the decoder and the packets related to executed instruction ranges will be accounted for and submitted for synthesis.  In Perf terminology, the synthesis process deals with how decoded and relevant events are presented to users. + +When using the *report* utility packets are synthesises to form a flame graph, where hot spots can be identified quickly: + +*mpoirier@t430:\~/work/linaro/coresight/jun01-user$ perf report --stdio* + +```c +# Children      Self  Command  Shared Object     Symbol                 +# ........  ........  .......  ................  ...................... +# +    4.13%     4.13%  uname    libc-2.21.so      [.] 0x0000000000078758 +    3.74%     3.74%  uname    libc-2.21.so      [.] 0x0000000000078e50 +    2.06%     2.06%  uname    libc-2.21.so      [.] 0x00000000000fcaf4 +    1.65%     1.65%  uname    libc-2.21.so      [.] 0x00000000000fcae4 +    1.59%     1.59%  uname    ld-2.21.so        [.] 0x000000000000a7f4 +    1.50%     1.50%  uname    libc-2.21.so      [.] 0x0000000000078e40 +    1.43%     1.43%  uname    libc-2.21.so      [.] 0x00000000000fcac4 +    1.31%     1.31%  uname    libc-2.21.so      [.] 0x000000000002f0c0 +    1.26%     1.26%  uname    ld-2.21.so        [.] 0x0000000000016888 +    1.24%     1.24%  uname    libc-2.21.so      [.] 0x00000000000fcab8 +    1.19%     1.19%  uname    ld-2.21.so        [.] 0x0000000000008eb8 +    1.18%     1.18%  uname    libc-2.21.so      [.] 0x0000000000078e7c +    1.17%     1.17%  uname    libc-2.21.so      [.] 0x0000000000078778 +    1.08%     1.08%  uname    libc-2.21.so      [.] 0x0000000000078e98 +    1.04%     1.04%  uname    libc-2.21.so      [.] 0x0000000000072520 +    1.04%     1.04%  uname    libc-2.21.so      [.] 0x0000000000078e84 +    0.90%     0.90%  uname    libc-2.21.so      [.] 0x0000000000072368 +    0.86%     0.86%  uname    libc-2.21.so      [.] 0x00000000000fcac8 +    0.83%     0.83%  uname    libc-2.21.so      [.] 0x0000000000071624 +    0.81%     0.81%  uname    ld-2.21.so        [.] 0x00000000000084b4 +    0.80%     0.80%  uname    libc-2.21.so      [.] 0x0000000000074900 +    0.80%     0.80%  uname    libc-2.21.so      [.] 0x00000000000726c0 +    0.79%     0.79%  uname    libc-2.21.so      [.] 0x0000000000078e54 +    0.79%     0.79%  uname    libc-2.21.so      [.] 0x00000000000728d0 +    0.75%     0.75%  uname    libc-2.21.so      [.] 0x0000000000078e74_ +``` + +The above shows that 4.13% of all the instruction ranges started in library libc-2.21.so at address 0x0000000000078758.  Using the source code, the DSO file and an objdump utility it is possible to quickly identify the function that was referenced.  It is important to keep in mind that flame graphs are generated using the *entry* point only.  Nothing can be deduced about the path through the code that was taken after that. + +From more accurate results it is suggested to work with the *script* command where a user supplied script can take advantage of all the information conveyed by synthesised events by way of the [perf\_sample structure](https://elixir.bootlin.com/linux/latest/source/tools/perf/util/event.h#L180).  An example is the *cs-trace-disasm.py* script produced by Linaro: + +```c +FILE: /lib/aarch64-linux-gnu/ld-2.21.so CPU: 0 +         7f9175cd80:   910003e0        mov     x0, sp +         7f9175cd84:   94000d53        bl      7f917602d0 +FILE: /lib/aarch64-linux-gnu/ld-2.21.so CPU: 0 +         7f917602d0:   d11203ff        sub     sp, sp, #0x480 +         7f917602d4:   a9ba7bfd        stp     x29, x30, [sp,#-96]! +         7f917602d8:   910003fd        mov     x29, sp +         7f917602dc:   a90363f7        stp     x23, x24, [sp,#48] +         7f917602e0:   9101e3b7        add     x23, x29, #0x78 +         7f917602e4:   a90573fb        stp     x27, x28, [sp,#80] +         7f917602e8:   a90153f3        stp     x19, x20, [sp,#16] +         7f917602ec:   aa0003fb        mov     x27, x0 +         7f917602f0:   910a82e1        add     x1, x23, #0x2a0 +         7f917602f4:   a9025bf5        stp     x21, x22, [sp,#32] +         7f917602f8:   a9046bf9        stp     x25, x26, [sp,#64] +         7f917602fc:   910102e0        add     x0, x23, #0x40 +         7f91760300:   f800841f        str     xzr, [x0],#8 +         7f91760304:   eb01001f        cmp     x0, x1 +         7f91760308:   54ffffc1        b.ne    7f91760300 +FILE: /lib/aarch64-linux-gnu/ld-2.21.so CPU: 0 +         7f91760300:   f800841f        str     xzr, [x0],#8 +         7f91760304:   eb01001f        cmp     x0, x1 +         7f91760308:   54ffffc1        b.ne    7f91760300 +FILE: /lib/aarch64-linux-gnu/ld-2.21.so CPU: 0 +         7f91760300:   f800841f        str     xzr, [x0],#8 +         7f91760304:   eb01001f        cmp     x0, x1 +         7f91760308:   54ffffc1        b.ne    7f91760300 +FILE: /lib/aarch64-linux-gnu/ld-2.21.so CPU: 0 + +``` + +Here we can see exactly the path a processor took through the code.  The first field is the address in the DSO, the second the OPcode as found in the DSO at that specific address while the remaining of the line depicts an assembly language representation of the instructions as provided by objdump.  Instructions on how to setup an environment capable of producing the above output can be found on the [openCSD](https://github.com/Linaro/OpenCSD/blob/master/HOWTO.md) website. + +### **Conclusion** + +In this post we presented the main elements used to integrate the CoreSight framework with the Linux Perf core.  In kernel space CoreSight tracer configuration and control functions are folded in the PMU interface, allowing the Perf core to control trace generation the same way it does with any other system monitoring metrics.  In user space the very valuable metadata, along with trace session blobs, are extracted from the *perf.data* file and submitted to the *decoder* for packet extraction.  Different synthesis methods are offered depending on the level of details needed, i.e the popular flame graph is generated using *perf report* command while more detailed analysis can be rendered by python or perl scripts. + +An upcoming post on this blog will feature the OpenCSD library in detail.  It will introduce the different components, how these are used to decode trace,  and the C++ and C APIs allowing integration with various standalone programs.  The library example and test programs, which demonstrate using the library will also be presented. diff --git a/src/content/blogs/debugging-arm-kernels-using-nmifiq.mdx b/src/content/blogs/debugging-arm-kernels-using-nmifiq.mdx new file mode 100644 index 0000000..6857e76 --- /dev/null +++ b/src/content/blogs/debugging-arm-kernels-using-nmifiq.mdx @@ -0,0 +1,198 @@ +--- +author: daniel-thompson +comments: false +date: 2015-02-08T03:32:23.000Z +description: "Daniel Thompson talks about how Linaro’s work to upstream a + little known tool for Android evolved into an effort, in collaboration with + other contributors, to build a framework to exploit fast interrupt requests + and, as a result, port a wide variety of NMI-based diagnostic techniques to + Arm." +excerpt: Daniel Thompson talks about how Linaro’s work to upstream a little + known tool for Android evolved into an effort, in collaboration with other + contributors, to build a framework to exploit fast interrupt requests and, as + a result, port a wide variety of NMI-based diagnostic techniques to Arm. +link: /blog/core-dump/debugging-arm-kernels-using-nmifiq/ +tags: [] +title: Debugging Arm kernels using NMI/FIQ +related: [] + +--- + +# Debugging Arm kernels using NMI/FIQ + +Daniel Thompson talks about how Linaro’s work to upstream a little known tool for Android evolved into an effort, in collaboration with other contributors, to build a framework to exploit fast interrupt requests and, as a result, port a wide variety of NMI-based diagnostic techniques to Arm. + +# Introduction + +For several years Linaro has, alongside several others, been working to reduce the differences between the mainline kernel and the Android (AOSP) kernel. Some of the work has involved taking code from AOSP and modifying it to be suitable for adding to the mainline kernel. On other occasions ideas flow in the other direction and AOSP is able to discard code that has been rendered obsolete by changes to the mainline kernel. This work has been successful to the extent that it is now possible to take an unmodified mainline kernel and boot Android. It will be lacking features and the graphics is not accelerated but nevertheless this is a significant achievement. + +As this work has progressed, the line-of-code delta between mainline and AOSP has dropped significantly. In fact at the last audit one of the most significant contributors towards the line count turned out to be a little known tool for Android called the [FIQ debugger](https://android.googlesource.com/kernel/common.git/+/a82e9f5a7ee65687bda08d70256983fdade2d0d2/arch/arm/common/fiq_debugger.c). + +The Android FIQ debugger is often shipped as part of Google’s Nexus products and is similar in concept to kdb debugger found in the mainline kernel. Both debuggers allow a developer connected via a serial port to use a simple interactive command interpreter to examine the state of the system. The FIQ debugger has a number of interesting features that did not exist within kdb, these are summarized in an article [describing our early work on the FIQ debugger](http://lwn.net/Articles/600359/). + +There is a significant overlap between the two debugger so it did not seem worthwhile trying to upstream the FIQ debugger as a standalone feature, instead we sought to replicate features of the FIQ debugger in kdb.  This blog post will focus exclusively on the FIQ debugger’s signature feature: that it can be triggered by FIQ as well as IRQ.\***\* \*\*** + +A debugger based on FIQ are robust enough to remain functional in circumstances where other on-device debuggers fail. In particular a debugger based on regular interrupts can only be invoked when interrupts are enabled, making it very difficult to debug failures that occur within critical sections when interrupts are masked. + +# An aside: What is FIQ? + +FIQ stands for [Fast Interrupt reQuest ](http://en.wikipedia.org/wiki/Fast_interrupt_request)and is a feature found in the majority of Arm cores, including all Armv7-A devices. It augments regular interrupts by providing a second mechanism to asynchronously interrupt the CPU. The two interrupt signals, FIQ and IRQ, can be independently masked and Linux code seldom, if ever sets the FIQ mask bit. + +*Note: On Armv7-A devices that have security extensions (TrustZone) FIQ can only be used by the kernel if it is possible to run Linux in secure mode. It is therefore not possible to exploit FIQ for debugging and run a secure monitor simultaneously. At the end of this blog post we will discuss potential future work to mitigate this problem.* + +FIQ can perhaps best be characterized as a thirty year old trick designed to eliminate the need for a DMA unit in certain low cost systems. Avoiding a DMA unit becomes possible because, in addition to the separate masking, the CPU automatically [banks some of its registers ](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/ch02s08s01.html)when it switches to FIQ mode. These extra registers make it possible to service FIFO interrupts very quickly and without needing to use the stack. The only (data side) memory accesses needed are those required to fetch and store data from the FIFO.\***\* \*\*** + +Thirty years on the “fast” features of FIQ remain interesting for a few niche applications, most notably among FPGA developers, but for a debugger based on FIQ we have little interest in anything except the separate mask bit. The separate mask bit allows us to treat FIQ like the non-maskable interrupt (NMI) found on many other architectures (including x86). + +*** + +# Early work + +Our early work focused exclusively on extending code found in Arm's kgdb and kdb support to allow it to be triggered using FIQ. We built just enough infrastructure within the kernel to support this use case and paid little attention to beyond getting that single job done. + +The code was fully functional and allowed us to develop a good understanding of the challenges of working with NMIs. Any code that is called from an NMI handler must be carefully audited to make sure it avoids all forms of locking, including spin locks. When we start calling code from NMI for the first time we often have to make it NMI-safe by finding ways to make the code lock-less. For example, we found that several polling serial drivers used spin locks. This was an important discovery since kgdb and kdb poll the UART in order to communicate.\***\* \*\*** + +We regularly shared the resulting patchset (http://thread.gmane.org/gmane.linux.ports.arm.kernel/331027) on the kernel mailing lists. The community feedback arising from these patches convinced us that we need to raise our sights beyond kgdb and build a foundation to support all of the kernels existing NMI based features. Only by building this foundation would we be able to convince the maintainers that our approach was the correct one. + +# Backtrace on all CPUs\***\* \*\*** + +Most advice on upstreaming includes somewhere within it the idea that the way to build new kernel features is one patch at a time, piece by piece, little by little. In the context of NMI based diagnostics the question we must answer is *“what is the smallest change that can do something useful with an NMI?”*\***\* \*\*** + +Our answer (admittedly supplied to us in a post from Thomas Gleixner (http://thread.gmane.org/gmane.linux.ports.arm.kernel/331027/focus=1778905)) was to implement a function called arch\_trigger\_all\_cpu\_backtrace(). + +All cpu backtrace is called by the spinlock debugging code (CONFIG\_DEBUG\_SPINLOCK) when it thinks the system might have locked up. It works by sending IPIs (inter-processor interrupts) that raise FIQ on the target processes and, because it uses FIQ, these target processors respond and issue a stack trace even if they are locked up and have interrupts masked.\***\* \*\*** + +Normally on an Arm system, when a deadlock occurs, spinlock debugging will only show the backtrace of the CPU that’s stuck and this might not be the CPU that owns the lock. With all cpu backtrace then we get to see much more of the system hopefully allowing us to find the fault more quickly. For example the following screenshot shows what you would see the spinlock deadlock detection triggered on a typical Arm kernel (the functions highlighted were added to intentionally create a lockup warning): + +![Backtrace-on-all-CPUs-1](/linaro-website/images/blog/Backtrace-on-all-CPUs-1) + +Here we can see where we have locked up, but it isn’t clear why.\***\* \*\*** + +With all cpu backtrace enabled we would still get the above information about CPU that is stuck but we would also be able to scroll down and see this: + +![Backtrace-on-all-CPUs-2](/linaro-website/images/blog/Backtrace-on-all-CPUs-2) + +Which better helps us narrow down why the deadlock occurred. + +This patchset is mature and no longer expected to change significantly. Some parts of it, such as the default FIQ handler (handle\_fiq\_as\_nmi) are already upstreamed. The remaining parts that are waiting to be merged include code to initialize the GIC and the Arm architecture specific code that handles the IPI. + +# Hardware performance monitoring + +After completing the previous patch set we stop and ask again *“what is the smallest change that can do something useful?”* This time we turn our attention to the PMU (performance monitoring unit). It is an attractive target because the PMU on modern x86 Linux systems is hooked up to NMIso we can be confident of having a mature sub-system to work with and can expect  very few, if any, NMI related bug in the generic code. + +The PMU is hooked up to the kernel’s perf events framework and allows us to monitor and profile [CPU behaviour related to performance](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0388f/Bcgddibf.html) including, among many others, CPU cycles consumed, cache misses, and data load/stores. PMU events increment a counter.  For small sections of code the counts can be read before and after the code under test but this may not be practical for larger code bases. For large code bases statistical profiling is often preferred. During statistical profiling each event count is given a high watermark and when that value is reached and interrupt is generated. This allows the PMU to, for example, generate an interrupt every 20 cache misses. Statistics gathers during interrupt handling will quickly identify code that frequently misses the cache allowing it to be optimized.\***\* \*\*** + +The kernel already has drivers for PMU and they work well but, because they are based on normal interrupts, they do have a subtle limitation. That cannot perform statistical profiling of code that runs with interrupts masked. When we use the FIQ to handle PMU events we are able to profile the entire kernel (except for the PMU management itself) and this gives allows us see much more of the system. For example, when we use FIQ handling PMU events, it is possible for use to profile frequently called interrupt handlers or to identify a heavily contented spin\_lock\_irq(). + +For some workloads the difference can be striking. The workload for both examples below is the same: dd if=/dev/urandom of=/dev/null. The first screenshot perfectly illustrates the limitation of profiling from normal interrupt handler, over 90% of the CPU time is spent unlocking interrupts and the cryptographic operations that should dominate the use case are completely hidden. + +![Hardware-performance-monitoring](/linaro-website/images/blog/Hardware-performance-monitoring) + +When we enable the FIQ we immediately get a much deeper insight. Not only can we can see the cryptographic operations but we can also see how much impact the fact I had compiled the kernel with lockdep enabled is having on this use case. + +![Hardware-performance-monitoring-2](/linaro-website/images/blog/Hardware-performance-monitoring-2) + +The primary feature introduced by this patchset is to extend the irq sub-system to make it possible to route regular interrupts to FIQ. This change was not required previously because IPIs are architecture specific and do not use irq sub-system much. Once this feature was added the changes needed to the PMU driver were fairly minor. + +This patch has been published as an RFC and will need further work before it is ready to merge. + +# Enabling the hard lockup detector + +The hard lockup detector is a watchdog built into Linux that uses a periodic NMI in order to detect if the system has become unresponsive. It is used to detect any kind of fault that can causes interrupt handling to fail. Examples include badly matched disables, spurious interrupts, and live locks inside critical sections. + +*Note:* *The hard lockup detector is partnered by the soft lockup detector. The soft lockup detector runs from an interrupt handler and checks for faults that could prevent threads from being scheduled correctly. Interestingly the hard lockup detector doesn’t monitor interrupts directly, instead it monitors the health of the soft lockup detector. If the soft lockup detector fails to run the hard lock detector infers that interrupts have failed and reports the fault.* + +The hard lockup detector was selected by the *“what is the smallest change?”* test because it uses the performance monitoring framework to configure the periodic NMI on each processor. Thus the work to enable it a tiny bit of plumbing and fits into a single patch. + +At present the patch is on [Linaro’s git server](https://git.linaro.org/people/daniel.thompson/linux.git/commit/50316b4218af5b6fbe68a6478613b42258c1b491) but has not been posted on the kernel mailing lists due to its relatively trivial nature, some small issues mentioned in the commit comment and its dependence on other patches that remain at the RFC stage. + +*** + +# The kernel debugger + +Finally we return our attention once more to adding FIQ support for kgdb and kdb. With the infrastructure already, and with a pile of NMI-safety fixes already upstreamed as a result of our earlier work the patch set to add FIQ support comes together in just five patches. + +The bulk of the work is simply the plumbing need to divert the UART interrupt from IRQ to FIQ. As a result whenever a character appears in the UART’s RX FIFO the FIQ handler runs and uses the polled UART drivers to fish out the character and decide what to do next. Also needed is a small extension to the all-cpu-backtrace IPI so it can be also be used to stop all the processors on a SMP system. + +Like the hard lockup patch the kgdb patches are not yet shared on the kernel mailing lists as we are still working hard to upstream its dependencies. Nevertheless it is fully functional and available via git. + +# HOWTO + +A kernel containing all the NMI/FIQ work can be found here: + +[https://git.linaro.org/people/daniel.thompson/linux.git](https://git.linaro.org/people/daniel.thompson/linux.git) + +The `merge/fiq` branch contains all features discuss above. Be aware that the branch is frequently rebased; at the time of writing is based on the v3.19-rc6 kernel. +`ARCH=arm make multi_v7_defconfig` + +```bash + +scripts/config \ + +--enable DEBUG_SPINLOCK --enable LOCKUP_DETECTOR \ + +--enable DEBUG_INFO --enable MAGIC_SYSRQ \ + +--enable KGDB --enable KGDB_KDB --enable KGDB_SERIAL_CONSOLE \ + +--enable KGDB_FIQ --enable SERIAL_KGDB_NMI + +ARCH=arm make olddefconfig + +ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- make -j 12 + +``` + +*** + +If you don’t have a board capable of running a multi-platform kernel or that cannot boot into secure mode then you might prefer to test using the TrustZone support in qemu. + +Booting the kernel as normal will give you access to all of the features discussed above, with the exception of kgdb. + +Some ideas to try out: + +* `-L` (either by `echo l > /proc/sysrq-trigger` or by sending `-L` via the UART): This will show the stack trace of all CPUs. This should show the CPU requesting the backtrace running \ \_\_handle\_sysrq and all other CPUs responding by running handle\_fiq\_as\_nmi. + +* perf top: This will show a simple statistical profile based on counting CPU cycles used. Try to run a use-case that you know involves significant interrupt locking in order to see the full benefit (or use the dd example from earlier). + +* cat /proc/interrupts: The NMI field is incremented by the default FIQ handler (handle\_fiq\_as\_nmi) allowing you to quickly check FIQ is working for you. + +* Set the NMI watchdog running (echo 0 > /proc/sys/kernel/nmi\_watchdog; echo 1 > /proc/sys/kernel/nmi\_watchdog) and then write a kernel module to make the kernel lockup (you could also use the one already included in the merge/fiq branch). + +To experiment with kgdb/kdb you will need to modify the kernel command line to enable the NMI-based serial port wrapper. This will vary depending upon your serial port settings by as an example: + +```bash + console=ttyAMA0,115200 +``` + +Should be changed to: + +```bash + console=ttyNMI0 kgdboc=ttyAMA0,115200 +``` + +With this change the kernel should boot as normal but the serial port will have a wrapper applied so it can be used by the FIQ handler. To trigger kdb you must manually type the gdbserver protocols wake up command $3#33 . + +# The future + +There are three potential activities related to this work in the future:\***\* \*\*** + +1. All the patches discussed will be maintained both to nurse them until they are delivered to the upstream kernel and to ensure they continue to be supported after they are merged. + +2. Armv8-A and GICv3 introduce a new co-processor interface to the GIC (both for AArch32 and AArch64) that we hope can be exploited to simulate NMIs without using FIQ. This should allow modern Arm devices to benefit from the robustness of NMI debug features without needing to run in secure mode. + +3. OP-TEE and other secure monitors could be extended to allow it to handle some FIQs on behalf of the non-secure OS and route these interrupts back into the non-secure world. This would allow an NMI to be present even where Linux cannot run in secure mode. + +From the above list the first two items are being actively pursued by Linaro although our work on Armv8-A is still in the very early stages. + +Right now there are no plans at present to work on the final item, in part this is because it is more or less rendered obsolete by the switch to Armv8-A systems. There also remain some serious technical challenges too. In particular world switching is a relatively expensive operation, making its use for performance monitoring unwise. + +When we started this work our goal was to take a single feature from Android and make it more widely available. The feedback we received from the community challenged us to do more and result is a wide variety of debugging tools, all previously missing on Arm, that have been developed and can potentially be used across the eco-system, from mobile phones to large-scale servers. Interacting with the community in this way is, without doubt, one of the most exciting thing about writing open source software. + +***The community is, of course, made up of individuals and among the many people I have met so far I would especially like to thank Thomas Gleixner, Russell King, John Stultz, Dirk Behme and Will Deacon who variously have helped with code reviews, advice, feedback and encouragement.*** + +## **Correction** + +**In the article, the section "Backtrace on all CPUs", incorrectly implies that all work on all CPU backtrace for Arm was done by Linaro employees. In fact, Russell King provided an *initial prototype implementation* (http://thread.gmane.org/gmane.linux.ports.arm.kernel/353795/) for Arm, derived from the existing x86 implementation. This patch was combined with patches from our own early work and the combined patchset evolved into the work presented in this article.** + +1: Once spin\_lock\_irq() has masked interrupts it becomes invisible to the profiler no matter how long it spends spinning trying to acquire the contended lock. diff --git a/src/content/blogs/demo-friday-linaro-connect-q1-12-show-latest-linux-developments-arm.mdx b/src/content/blogs/demo-friday-linaro-connect-q1-12-show-latest-linux-developments-arm.mdx new file mode 100644 index 0000000..ecdaa7c --- /dev/null +++ b/src/content/blogs/demo-friday-linaro-connect-q1-12-show-latest-linux-developments-arm.mdx @@ -0,0 +1,27 @@ +--- +author: linaro +date: 2012-06-20T11:17:41.000Z +description: CAMBRIDGE, UK - 20 JAN 2012 +link: /news/demo-friday-linaro-connect-q1-12-show-latest-linux-developments-arm/ +title: Demo Friday at Linaro Connect Q1.12 to show latest Linux developments on Arm +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 20 JAN 2012 + +## Linaro members, partners and community offer interactive demonstrations showcasing Arm-processor-based boards and Linaro builds of Android and Ubuntu + +Linaro, a not-for-profit engineering organization consolidating and optimizing open source software for the Arm architecture, today announced Demo Friday, to be held at the Linaro Connect Q1.12 event. Demo Friday is a two-hour event that will showcase new and innovative ways Linaro members, partners and community are using Linaro Builds of Ubuntu and Android on Arm processor-based boards. Demo Friday will take place on Friday, February 10, 2012 from 4pm to 6pm in the Vernada Room of the Sofitel Hotel in Redwood City, California. Demo Friday is open to the public. This is an exciting and educational opportunity for everyone to get a hands on demonstration of what Linaro is enabling with Arm processor-based boards for Linux on Arm. + +Previous demonstration events have included Kinnect-face robot recognizing human faces, Linaro Android Build Service, Automated Validation Infrastructure for Android system, Snowball board demo, Quick Start board demos and more. More about past events can be found on the Linaro wiki ([wiki-archive.linaro.org/Events/2011-05-LDS/Showcase](https://wiki-archive.linaro.org/Events/2011-05-LDS/Showcase)). + +All companies and engineers who are using Linaro on Arm processor-based boards are invited to participate in Demo Friday and highlight their work. All the demonstrations for Demo Friday have not yet been finalized and there are opportunities to add more to the line-up. Anyone interested in this opportunity to participate can email the details of their demonstration to:events@linaro.org. + +## Join us for Linaro Connect Q1.12 + +Linaro Connect is held every three to four months to bring the Linux on Arm community together to learn about the latest SoC developments, plan the next development cycle and hack together. These events give the Linux community an opportunity to be a part of the Linaro team and help to define the Arm tools, Linux kernels and builds of key Linux distributions including Android and Ubuntu on member SoCs. Join us for our next event February 6-10th in San Francisco, CA. + +About Linaro +Linaro is a not-for-profit engineering organization working on consolidating and optimizing open source software for the Arm architecture, including the gcc toolchain, the Linux kernel, Arm power management, graphics and multimedia interfaces. Linaro's key value is in working on generic Arm technology that is common to all Arm SoC vendors. In this way engineering costs are shared, rather than each vendor having to implement core software technology themselves, which has resulted in fragmentation and overhead in maintaining code that cannot be upstreamed to the mainline Linux kernel and other open source projects. Linaro's output is used by its members, and by distributions including Android, Ubuntu and OEM/ODM customized versions of Linux. Linaro's goals are to deliver value to its members through enabling their engineering teams to focus on differentiation and product delivery, and to reduce time to market for OEM/ODMs delivering open source based products using Arm technology. For more information, please visit [www.linaro.org](/) diff --git a/src/content/blogs/energy-aware-scheduling-eas-progress-update.mdx b/src/content/blogs/energy-aware-scheduling-eas-progress-update.mdx new file mode 100644 index 0000000..084b2e0 --- /dev/null +++ b/src/content/blogs/energy-aware-scheduling-eas-progress-update.mdx @@ -0,0 +1,394 @@ +--- +excerpt: Arm and Linaro are jointly developing Energy Aware Scheduling, a + technique that improves power management on Linux by making it more central + and easier to tune. See the latest update. +title: Energy Aware Scheduling (EAS) progress update +description: In this article, Linaro take a detailed look at Energy Aware + Scheduling (EAS) a technique that improves power management on Linux. Read + about it here! +image: linaro-website/images/blog/Banner_Core_Technologies +author: linaro +date: 2015-09-18T18:01:52.000Z +tags: + - arm + - linux-kernel +link: /blog/core-dump/energy-aware-scheduling-eas-progress-update/ +related: [] + +--- + +*Authors:  Ian Rickards (Arm),  Amit Kucheria (Linaro)* + +**Today, power management on Linux is implemented by different subsystems that work in a largely un-coordinated manner. This makes platform adaptation difficult and tuning complex. Arm and Linaro are jointly developing "Energy Aware Scheduling", a technique that improves power management on Linux by making it more central and easier to tune.  This will improve mainline Linux support for advanced multicore SoC’s that power current and future mobile devices and other consumer products.** + +The existing Linux ‘Completely Fair Scheduler’ has a throughput based policy.  For example, if you have a new task and an idle cpu, then the scheduler will always put the new task on the idle cpu. However, this may not be the best decision for lowest energy usage.  EAS is designed to implement energy saving without affecting performance. + +The Energy Aware Scheduling project consists of a number of component tasks: + +![EAS task image](/linaro-website/images/blog/EAS-task-image) + +The goal is to introduce generic energy-awareness in upstream Linux: + +1. Using a clean, generic design to support a broad range of CPU topologies. +2. Based on scientific, measured energy model data rather than magic tunables. +3. Providing a high-quality baseline solution that can be used as-is, or extended as needed. +4. Designed-for-mainline => reducing software maintenance costs. + +EAS will unify 3 separate frameworks in the Linux kernel that are currently only loosely connected: + +* Linux scheduler (Completely Fair Scheduler - CFS) +* Linux cpuidle +* Linux cpufreq + +These existing frameworks have their own policy mechanisms that make decisions independently. Our previous blog post covered the limitations of this approach. + +The optimal solution is to fully integrate these functions into the Linux scheduler itself, with sufficient information to enable the most energy-efficient scheduling decisions to be made. + +A typical Arm multi-core SoC would have the following voltage and frequency domains: + +![Arm voltage EAS blog](/linaro-website/images/blog/ARM-voltage-EAS-blog) + +Ideally, each cluster will operate at its own separate independent frequency and voltage.  By lowering the voltage and frequency, there is a substantial power saving.  This allows the per-cluster power/performance to be accurately controlled, and tailored to the workload being executed. + +A generic energy model based approach is expected to support a broad range of current and future CPU topologies, including SMP, multi-cluster SMP (e.g. 8-core Cortex-A53 products), as well as traditional Arm big.LITTLE. + +Since the original discussions started on the Linux Kernel Mailing List in 2013, there has been significant progress recently: + +![AES blog image 3](/linaro-website/images/blog/AES-blog-image-3) + +## **Scheduler idle-state awareness** + +*Engineer:  Nicolas Pitre, Linaro \[Merged Sep-2014, in Linux 3.18 and later]* + +The sched-idle enhancement makes the scheduler aware of the idle state of the CPU’s.  When waking up a cpu it will now always pick the CPU in shallowest idle-state, minimizing wake-up time and energy. + +In the example below, a new task needs to wake up, but it will not fit on CPU#0 because the current operating point is almost fully utilized.  With sched-idle integrated, the new task always gets placed on CPU #1 since it is in the shallowest idle state (WFI), and the other cluster remains in C2 shutdown.  This is the lowest energy and fastest response option. + +![AES blog image 4](/linaro-website/images/blog/EAS-blog-4) + +## **DVFS (cpufreq) improvements** + +*Current situation with DVFS support in Linux* + +The existing cpufreq implementation is an extension to the Linux kernel, which uses a sampling-based approach to consider cpu time in idle along with some heuristics to control the CPU Operating Performance Point (OPP).  There are a number of disadvantages to this approach: + +1. Sampling based governors are slow to respond and hard to tune. +2. Sampling too fast: OPP changes for small utilization spikes. +3. Sampling too slow: Sudden burst of utilization might not get the necessary OPP change in time - reaction time might be poor. +4. Only aware of the overall CPU loading and is not aware of task migration. + +![AES blog image 5](/linaro-website/images/blog/EAS-blog-5) + +## **New scheduler-driven DVFS (sched-DVFS)** + +*Engineers:  Mike Turquette, Linaro/Baylibre \[latest PATCH v3, June-2015]* + +With scheduler task utilization tracking, a feature that the mainline kernel already supports, any OPP transition required will happen immediately based on the stored tracked load of the task. + +![AES blog image 6](/linaro-website/images/blog/EAS-blog-6) + +With sched-cpufreq, when the new task is placed on CPU#1, the cpu capacity for the little cluster changes immediately.  This uses the history of the task, which is stored internally as part of the CFS scheduler in the kernel.  This is a good approximation for many tasks which have consistent cpu load behavior. + +## **Foundations - Frequency and capacity invariant load tracking** + +*Engineers:  Morten Rasmussen/Dietmar Eggemann, Arm* + +The “Per-Entity Load Tracking” (PELT) framework in the Linux kernel determines the load of a task by looking at the utilization of cpus.  The existing design of PELT tracks the CPU utilization but does not accurately track the load on different CPUs at different frequencies or with different performance per MHz.  Arm has built on the recent July-2015 rewrite of PELT from Yuyang Du to add frequency and microarchitecture support: + +[https://lkml.org/lkml/2015/7/15/159](https://lkml.org/lkml/2015/7/15/159) - PELT rewrite (Yuyang Du, Intel corp.) +[https://lkml.org/lkml/2015/8/14/296](https://lkml.org/lkml/2015/8/14/296) - Frequency and microarchitecture invariance for PELT  (Arm) + +**Capacity** +This is a measure of the processing capability of a cpu.  Arm patches include enhancements for capacity to be extended with additional scaling for microarchitecture and current operating frequency. The cpu capacity at different operating points is based on measuring some standard benchmark metric ,e.g. “sysbench” + +**Utilization** +Traditionally the utilization has been related to the running time.  Arm foundational patches extend this to accommodate the frequency & performance of the cpu. + +*Existing utilization calculation* + +![AES blog image 8](/linaro-website/images/blog/EAS-8) + +*New utilization calculation takes into account frequency and microarchitecture* + +![AES blog image 9](/linaro-website/images/blog/EAS-image-9) + +## **Energy model** + +*Engineer:  Morten Rasmussen, Arm \[latest RFCv5, July-2015]* + +The EAS energy model is the final piece which enables the CFS with energy-aware task scheduling.  It allows the kernel to decide at run-time which scheduling decisions are the best ones for lowest energy usage. The Energy-Aware policy is to always pick the CPU with sufficient spare capacity and smallest energy impact. + +This also removes the magic tunables in some of the power management frameworks at present - you actually have to look into the code to understand what these magic tunables do.  For example, consider the big.LITTLE HMP thresholds, the scheduler tunables, and even the interactive governor tunables (used in product but didn’t make it to mainline) + +![AES blog image 10](/linaro-website/images/blog/EAS-image-10) + +The platform energy model is an accurate baseline model of the dynamic and static power used by the CPUs in the system. + +*Typical big.LITTLE CPU power/performance curves* + +![AES blog image 11](/linaro-website/images/blog/EAS-image-11) + +For each CPU, the energy model contains the following information + +![AES blog image 12](/linaro-website/images/blog/EAS-blog-image-12) + +We are discussing the best ways to express this energy model with the open source community. One option that is being considered is using a Device Tree + +#### *Options for placing a waking task* + +As seen in the diagram below, a newly waking task can sensibly be placed on either of the two CPUs - CPU#1 or CPU#3.   With the current mainline scheduler, either CPU#1 or CPU#3 could be chosen. + +![AES blog image 14](/linaro-website/images/blog/EAS-image-14) + +EAS considers the energy costs of the two options: + +**CPU#1**: operating point must be moved up for both CPU#0 and CPU#1 + +**CPU#3**: no operating point change, but higher power used as per Power/Performance graph below + +![AES blog image 15](/linaro-website/images/blog/EAS-image-15) + +Based on the above, EAS will probably choose CPU#1 because the small additional energy cost of increasing the OPP of CPU#0 (and CPU#1 by implication - since both CPUs are in the same frequency domain in this example) is not significant compared with the better power efficiency of running the task on CPU#1 instead of CPU#3.  The key foundational pieces are understanding the intensity of the task (done by PELT with frequency & microarchitecture invariance). + +EAS doesn’t evaluate all the possible options. That can introduce performance hits in key scheduler pathways. Instead,  EAS narrows down the search space to: + +* CPU the task ran on last time. +* CPU chosen by a simple heuristic which works out where the task fits best. + +Based on the energy model, EAS evaluates which of these two options is the most energy efficient. + +## **SchedTune** + +*Engineer:  Patrick Bellasi, Arm \[posted August-2015]* + +The ‘interactive governor’ appeared on Android in 2010, and it has proved to be a very popular solution for maximizing battery life whilst providing a high operating point suitable for interactive tasks. However, the interactive governor was not merged into the mainline Linux kernel. There is considerable interest in having a frequency boost capability available in mainline Linux as part of cpufreq (and potentially EAS in future). + +There has been a repeated demand to have a single, simple tunable ‘knob’ that permits the selection of energy efficient operation at one end and high performance operation at the other end. With sched-DVFS and EAS in place, the stage is set for implementing such a central tunable. Arm’s proposal for this tunable is called SchedTune. + +SchedTune adds an additional ‘margin’ into the tracked load from PELT. Sched-DVFS and EAS then use this ‘boosted’ tracked load when selecting operating points as usual. The magnitude of the margin is controlled by a single user-space facing tunable. + +![AES blog image 16](/linaro-website/images/blog/EAS-image-16) + +If the task appears to be bigger, the allocated MHz from cpufreq/sched-cpufreq will be higher.  Also, on a big.LITTLE system, it is more likely to be placed onto a big cpu. This simple technique permits the selection of a suitable power/performance point that provides the best interactive response for the system. + +## **Tooling & Analysis** + +Arm & Linaro have been working on implementing opensource test and analysis tools, most of which needed to be newly developed for the EAS project. + +### rt-app/ WorkloadGen  (Linaro) + +[https://wiki-archive.linaro.org/WorkingGroups/PowerManagement/Resources/Tools/WorkloadGen](https://wiki-archive.linaro.org/WorkingGroups/PowerManagement/Resources/Tools/WorkloadGen) + +Most existing benchmarks run flat-out, and there are few good existing tools to run lower-intensity use cases. + +rt-app is a linux command-line tool that creates light intensity workloads, using json files to describe different simulated use-cases. rt-app is already used by the scheduler community. + +### workload-automation (Arm) + +[https://github.com/Arm-software/workload-automation](https://github.com/Arm-software/workload-automation) + +This is a python framework for running standard tests and benchmarks on a target system. It supports: + +* Linux +* Android (browser and standard benchmarks) +* ChromeOS (telemetry benchmarks etc) + +Kernel ftrace logs are captured from the Linux kernel, and workload-automation integrates with various power measurement tools, e.g. NI DAQ for measuring device power, and ChromeOS servo boards. + +### TRAPpy (Arm) + +[https://github.com/Arm-software/trappy](https://github.com/Arm-software/trappy) +[https://github.com/Arm-software/bart](https://github.com/Arm-software/bart) + +trappy is a python-based visualization tool to help analyze ftrace data generated on a device. It depends on ipython notebook and pandas (python data analysis library), and can be used from a browser to zoom in to analyse scheduler behaviors. + +One important feature is it contains an API used for tracking behaviors for thread residency, which allows it to be used as the framework for regression testing for EAS.  Arm has a tool called “BART” - Behavior Analysis Regression Testing which uses this API. + +### idlestat (Linaro) + +[https://wiki-archive.linaro.org/WorkingGroups/PowerManagement/Resources/Tools/Idlestat](https://wiki-archive.linaro.org/WorkingGroups/PowerManagement/Resources/Tools/Idlestat) + +Idlestat uses kernel frace to monitor and capture C-state and P-state transitions of CPUs over a time interval. Idlestat can also use an energy model for a given platform to help estimate the energy consumption of a given workload. + +Idlestat can be used with sample workloads to capture and compare C-state and P-state behaviours in a reproducible manner across kernel versions. + +### kernelshark (existing) + +[https://www.redhat.com/](https://www.redhat.com/) + +X11/GTK tool used for analysis of ftrace data, useful for detailed scheduler analysis but does not offer the API capability of ‘trappy’ above. + +## **Getting involved with EAS** + +All the work on EAS is done in the open on mailing lists: + +1. Linux Kernel Mailing List (LKML) for patches and EAS architecture discussions + (postings on LKML prefixed with “sched:”) + This is the preferred option as the Linux kernel maintainers will see the questions. +2. eas-dev mailing lists ([http://lists.linaro.org](https://lists.linaro.org/mailman3/lists/) ) + This mailing list is to discuss experimental aspects of EAS developments that are too premature for discussion on LKML + +Arm provides a git repo containing the latest EAS patched into a recent Linux kernel + +Arm/Linaro are planning an LSK 3.18 backport of EAS (on a separate experimental branch) for availability soon, this will be the best route to Android testing. + +Arm and Linaro appreciate any participation in shaping the future direction of EAS, and we particularly welcome testing on a range of platforms including ‘tested-by’ comments on LKML. + +### Current patchsets for review + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DescriptionURL
Scheduler driven DVFS PATCH v3https://lkml.org/lkml/2015/6/26/620
+EAS RFCv5 +https://lkml.org/lkml/2015/7/7/754
SchedTune proposalhttps://lkml.org/lkml/2015/8/19/419
Foundational Patches (frequency and microarchitecture contribution to capacity/utilization, split out from RFCv5)https://lkml.org/lkml/2015/8/14/296
+Yuyang Du PELT rewrite v10 containing Arm enhancements to utilization calculation  (already queued for merging) +https://lkml.org/lkml/2015/7/15/159
+ +### Future patches under development + + + + +Proposed Patch + + + + + + + + + + + + + + + + +
+big.LITTLE awareness on wakeup path +
+Further Scheduler driven DVFS  enhancements +
+SchedTune extension for EAS +
+ + + + +Topic +URL + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+EAS + +http://www.linux-arm.org/git?p=linux-power.git +
+Idlestat + +http://git.linaro.org/power/idlestat.git +
+rt-app/workloadgen + +https://git.linaro.org/power/rt-app.git +
+TRAPpy + +https://github.com/Arm-software/trappy +
+BART + +https://github.com/Arm-software/bart +
+Workload Automation + +https://github.com/Arm-software/workload-automation +
+ +### Further reading + +LWN Article: “Steps toward Power Aware Scheduling”  (25-August-2015) +http://lwn.net/Articles/655479/ + +LWN article: “Teaching the scheduler about power management” (18-June-2014) +http://lwn.net/Articles/602479/ + +LWN article: “Power-aware scheduling meets a line in the sand” (5-June-2013) +http://lwn.net/Articles/552885/ diff --git a/src/content/blogs/first-systems-software-company-acadine-joins-linaro-mobile-group.mdx b/src/content/blogs/first-systems-software-company-acadine-joins-linaro-mobile-group.mdx new file mode 100644 index 0000000..af2b991 --- /dev/null +++ b/src/content/blogs/first-systems-software-company-acadine-joins-linaro-mobile-group.mdx @@ -0,0 +1,41 @@ +--- +excerpt: Linaro announces Acadine Technologies as the first systems software + company to become a member of the Linaro Mobile Group +title: First systems software company Acadine joins Linaro Mobile Group +description: Linaro announces Acadine Technologies as the first systems software + company to become a member of the Linaro Mobile Group +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +author: linaro +date: 2015-10-29T13:03:01.000Z +tags: + - linux-kernel + - open-source +link: /news/first-systems-software-company-acadine-joins-linaro-mobile-group/ +related: [] + +--- + +Cambridge, UK; 29 October 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that Acadine Technologies has become the first systems software company to join the Linaro Mobile Group (LMG). + +LMG was formed in July 2014 to consolidate and optimize open source software for Arm powered mobile phones, tablets, laptops and wearables. The Group's engineers work have been focused on the Android Open Source Project (AOSP), performance and power optimizations, graphics and GPGPU, and work with other groups in Linaro Core Engineering on other open source technologies. + +“We are delighted to welcome Acadine Technologies as the first systems software company to become a member of the Linaro Mobile Group”, said Robert Booth, Linaro’s Chief Operating Officer (COO). “Mobile vendors are continually looking for a choice of operating systems and H5OS is a new and very interesting option that provides flexibility and customization for partners. We look forward to working closely with Acadine and the other LMG vendors to ensure the best possible open source mobile software is readily available.” + +Acadine Technologies is a systems software company specializing in innovative mobile operating systems for mobile, wearable, and IoT devices. Acadine’s core product, called H5OS, is a web-centric operating system that is primarily based on HTML5, one of the open standard technologies of the modern Internet. + +“Acadine is committed to make H5OS an innovative open mobile operating system and we’re very excited to join Linaro,” said Dr. Li Gong, Founder, Chairman and CEO of Acadine. “We believe that Linaro’s experience in working with open source projects and the engineering resources that it and its members can apply to developing and optimizing platforms like H5OS will help us make our core product ecosystem-friendly to the community and customers around the world.” + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). + +**About Acadine** +Acadine™ Technologies is a startup company specializing in innovative mobile operating systems for mobile, wearable, and IoT devices. Acadine is based in Hong Kong, with operations in Beijing, Taipei, Palo Alto, and London. For more information, please contact press@xyz.com. + +**About H5OS** +A completely web-centric operating system, H5OS eliminates the native APIs that give the conventional mobile OS owners full business control over other mobile industry participants. As a result, H5OS offers the huge potential of a simpler technical architecture, more efficient memory and power consumption, and completely dynamic and seamless interactions between the mobile operating system, on-device applications, and the millions of web apps and services on the Internet at large. + +The H5OS open architecture also brings about an open mobile ecosystem with open participation, thus enabling choices for everyone, from device makers to operators to consumers, to freely select and experience the best of the web. For more information about our products, please contact press@acadine.com. diff --git a/src/content/blogs/fujitsu-semiconductor-joins-linaro.mdx b/src/content/blogs/fujitsu-semiconductor-joins-linaro.mdx new file mode 100644 index 0000000..b47470d --- /dev/null +++ b/src/content/blogs/fujitsu-semiconductor-joins-linaro.mdx @@ -0,0 +1,30 @@ +--- +author: linaro +date: 2013-07-10T11:27:23.000Z +description: CAMBRIDGE, UK - 10 JUL 2013 +link: /news/fujitsu-semiconductor-joins-linaro/ +title: Fujitsu Semiconductor joins Linaro +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 10 JUL 2013 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, today announced that Fujitsu Semiconductor Limited has joined Linaro to cooperate on new Arm technologies. + +Fujitsu Semiconductor will contribute resources to work together with the resources from existing Linaro members. This shared team of over 100 engineers is directed by a [Technical Steering Committee (TSC)](/about/team/), which now includes Fujitsu Semiconductor. + +Through the TSC, Linaro members are defining the future of Linux on Arm. Accelerated time to market for new Arm technologies is achieved by working together within Linaro on shared solutions, while focusing members' in-house engineering resources on differentiation. + +Linaro uses a unique business model where multiple companies create core open source software once with a shared investment in a single software engineering team, rather than by creating multiple, fragmented software solutions in isolation. Membership delivers an immediate return on investment as new members get immediate access to a significant engineering team. + +"We are pleased to join Linaro as a [Club Member](/membership/), and Linaro Linux Kernel which supports Arm big.LITTLE technology enables the development of powerful, yet low power consumption SoC for a broad range of embedded applications" said Mitsugu Naito, executive vice president, Advanced Products BU, Fujitsu Semiconductor Limited. "Our SoC development expertise coupled with Linaro Linux Kernel works on Arm big.LITTLE processing technology will provide the market with the combined performance and low power required for the next generation of innovative embedded products." + +"We are very pleased to welcome Fujitsu Semiconductor as a Linaro member," said George Grey, Linaro CEO. "With Linaro continuing to enable shared member investment on consolidation and optimization of Linux and Android on Arm, as well as working on new Arm technologies such as big.LITTLE and next-generation 64-bit devices, we are excited to have Fujitsu Semiconductor working closely with us." + +About Linaro + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 140 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit . diff --git a/src/content/blogs/google-atap-joins-linaro-mobile-group-to-extend-collaboration-in-project-ara.mdx b/src/content/blogs/google-atap-joins-linaro-mobile-group-to-extend-collaboration-in-project-ara.mdx new file mode 100644 index 0000000..575ae29 --- /dev/null +++ b/src/content/blogs/google-atap-joins-linaro-mobile-group-to-extend-collaboration-in-project-ara.mdx @@ -0,0 +1,31 @@ +--- +author: shovan-sargunam +date: 2015-05-01T11:00:04.000Z +description: Linaro Ltd, the collaborative engineering organization developing + open source software for the Arm® architecture, today added the Advanced + Technologies and Projects (ATAP) Group of Google Inc. as a member of the + Linaro Mobile Group (LMG). +excerpt: Linaro Ltd, the collaborative engineering organization developing open + source software for the Arm® architecture, today added the Advanced + Technologies and Projects (ATAP) Group of Google Inc. as a member of the + Linaro Mobile Group (LMG). +link: /news/google-atap-joins-linaro-mobile-group-to-extend-collaboration-in-project-ara/ +tags: [] +title: Google ATAP joins Linaro Mobile Group to extend collaboration in Project Ara +related: [] + +--- + +## Google ATAP joins Linaro Mobile Group to extend collaboration in Project Ara + +CAMBRIDGE,UK; 1 MAY 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today added the Advanced Technologies and Projects (ATAP) Group of Google Inc. as a member of the Linaro Mobile Group ([LMG](/membership/#lmg)). + +LMG was formed in July 2014 to consolidate and optimize open source software for mobile platforms on Arm. Linaro began working with the Google ATAP group on Project Ara in June 2014. The work has been focused on extending Android for Project Ara, including support for MIPI UniPro and plug-and-play recognition of all types of modules, from batteries and displays to cameras and medical sensors. + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies collaborate with Linaro’s own engineering team to define the future of open source on Arm. The company’s engineering organization comprises over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: its goal is to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit [](/). diff --git a/src/content/blogs/google-becomes-club-member-linaro.mdx b/src/content/blogs/google-becomes-club-member-linaro.mdx new file mode 100644 index 0000000..3d0ab09 --- /dev/null +++ b/src/content/blogs/google-becomes-club-member-linaro.mdx @@ -0,0 +1,40 @@ +--- +excerpt: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm® ecosystem, today announced that Google Inc. + has joined Linaro as a Club member. +title: Google Becomes Club Member of Linaro +description: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm® ecosystem, today announced that Google Inc. + has joined Linaro as a Club member. +image: linaro-website/images/blog/Client_Devices_banner_pic +author: linaro +date: 2017-03-06T12:00:37.000Z +tags: + - linaro-connect + - linux-kernel +link: /news/google-becomes-club-member-linaro/ +related: [] + +--- + +Budapest, Hungary; 6 March 2017 + +Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced that Google Inc. has joined Linaro as a Club member. + + + +Google’s membership of Linaro demonstrates its strong support for open source collaboration, which it plans to leverage working together with Linaro and its member companies. Google develops Android to run on multiple architectures and the Linaro engagement is in particular aimed at collaboration with Arm based system-on-chip (SoC) partners. Android Open Source Project (AOSP) has been a key part of Linaro’s work since its founding in 2010, and Linaro’s AOSP contributions have now spanned 24 kernel versions from Linux kernel 2.6.36 in Android Honeycomb (3.0) to Linux 4.10 today. + +“Google joining as a Club Member is a significant milestone for Linaro”, said George Grey, Linaro CEO. “We look forward to working with Google, our Arm based SoC partner members and handset vendors on collaborative engineering in the AOSP to accelerate the delivery of new features to the mobile marketplace”. + +Linaro now has over 35 member companies working together to accelerate open source software development. As the range and capabilities of SoCs have grown exponentially, the benefits to be gained from collaboration on common open source software across the industry increase. Linaro’s goals are to enable more rapid innovation in the industry through using shared resources to engineer common software elements, enabling each member to focus more of their own resources on product differentiation. + +**About Google and Linux** + +Google has a proven track record in the Linux and the Open Source Community. It is one of the top 10 contributors to the Linux Kernel today, and employs many kernel developers and open source leaders to work directly on Linux and associated open source projects. As well as code contribution, the company sponsors many open source and Linux-related events, partly through its membership of the Linux Foundation. Google also runs an extensive range of programs that encourage students to get involved with open source. + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/hisilicon-joins-linaro-as-core-member.mdx b/src/content/blogs/hisilicon-joins-linaro-as-core-member.mdx new file mode 100644 index 0000000..d8e3616 --- /dev/null +++ b/src/content/blogs/hisilicon-joins-linaro-as-core-member.mdx @@ -0,0 +1,32 @@ +--- +author: linaro +date: 2012-10-29T12:21:01.000Z +description: SHENZHEN, CHINA AND CAMBRIDGE, UK - 29 OCT 2012 +link: /news/hisilicon-joins-linaro-as-core-member/ +title: HiSilicon Joins Linaro as Core Member +tags: [] +related: [] + +--- + +SHENZHEN, CHINA AND CAMBRIDGE, UK - 29 OCT 2012 + +Wireless communication chipset solutions provider HiSilicon Technologies Co., Ltd., and Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, today announced that HiSilicon has joined Linaro as a core member. + +HiSilicon will appoint a representative to the board of Linaro and work with other members to develop the future of Linux on Arm. The company will contribute resources to work together with the engineers from other Linaro members. In addition to joining the board of Linaro, HiSilicon will join the Technical Steering Committee (TSC), which directs the shared Linaro engineering team of over 100 engineers. + +Linaro has a unique business model where multiple companies create core open source software once with a shared investment in a single software engineering team, rather than by creating multiple, fragmented software solutions in isolation. Membership delivers an immediate return on investment as new members get immediate access to a significant engineering team. + +We recognize the importance of working together with other companies to efficiently generate shared solutions that provide a platform for innovation, increased differentiation and faster time to market.” said Teresa He, President of HiSilicon. “HiSilicon has long recognized the importance of software development and LInaro presents a unique oppoortunity for us to share both our own experience and the experience of the other LInaro members to develop highly competitive, innovative products. + +"We are extremely pleased to welcome HiSilicon as a Linaro member," said George Grey, Linaro CEO, "HiSilicon is a leading chipset provider in many sectors with the need to deploy open source software in more and more products, from telecom networks to consumer electronics. We look forward to adding HiSilicon software engineers to our global team working on the delivery of core Linux software for the Arm architecture. + +About HiSilicon + +HiSilicon Technologies Co., Ltd. was established in October 2004. Headquartered in Shenzhen, China, Hisilicon has set up design divisions in Beijing, Shanghai, Silicon Valley (USA) and Sweden. The company is a leading chipset solution provider for telecom network, wireless terminal and digital media with the advantage of providing end-to-end chipsets and solutions from telecom network to consumer electronics. It has been serving more than 200 global operators in over 100 countries and will continue to bring maximum value to global operators and consumers. For more information, please visit http://www.hisilicon.com. + +About Linaro + +Linaro is the place where engineers from the world's leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 120 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro's work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro's engineering work is open to all online. To find out more, please visit diff --git a/src/content/blogs/hoperun-joins-linaro-96boards-steering-committee.mdx b/src/content/blogs/hoperun-joins-linaro-96boards-steering-committee.mdx new file mode 100644 index 0000000..f3379b6 --- /dev/null +++ b/src/content/blogs/hoperun-joins-linaro-96boards-steering-committee.mdx @@ -0,0 +1,45 @@ +--- +excerpt: Jiangsu HopeRun Software has joined the 96Boards initiative as a + Steering Committee member. This new collaboration allows HopeRun to cooperate + with 96Boards Manufacturing Partners and SoC vendors to market products under + the 96Boards brand and influence the development of the 96Boards + specifications and initiative. +title: HopeRun joins Linaro 96Boards Steering Committee +description: Jiangsu HopeRun Software has joined the 96Boards initiative as a + Steering Committee member. This new collaboration allows HopeRun to cooperate + with 96Boards Manufacturing Partners and SoC vendors to market products under + the 96Boards brand and influence the development of the 96Boards + specifications and initiative. +image: linaro-website/images/blog/BKK19-150 +author: linaro +date: 2016-09-22T10:57:54.000Z +tags: + - linaro-connect + - linux-kernel + - open-source +link: /news/hoperun-joins-linaro-96boards-steering-committee/ +related: [] + +--- + +Cambridge, UK: 22 September 2016 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, announced today that Jiangsu HopeRun Software has joined the 96Boards initiative as a Steering Committee member. This new collaboration allows HopeRun to cooperate with 96Boards Manufacturing Partners and SoC vendors to market products under the 96Boards brand and influence the development of the 96Boards specifications and initiative. + +96Boards is Linaro’s initiative to build a single software and hardware community across low-cost development boards based on Arm technology. HopeRun provides software solutions and services internationally with a focus on offshore software outsourcing, mobile phone software and GIS systems. HopeRun will be able to leverage 96Boards to provide its services across a broader range of SoC solutions and provide consistent support that benefits from the platform standardization provided by the initiative. + +“96Boards has already brought us opportunities to explore new projects with potential partners” said ZhongYi, General Manager of HopeRun Smart Terminal BG. “We will be attending Linaro Connect LAS16 to meet with the other 96Boards members and Linaro community and we’re looking forward to working on our own 96Boards platforms and expanding our customer base.” + +The 96Boards steering committee now includes more than ten companies who are working together on Consumer, Enterprise, Digital Home, Networking and IoT specifications. To date, the Consumer, Enterprise and TV Platform Edition specifications have been released with Consumer and TV Platform edition boards readily available and the first Enterprise and IoT edition boards under development. In total, there are now over twenty boards being sold or under development with a large number of mezzanine products and other accessories being released. + +“HopeRun joining as a Steering Committee member brings additional software expertise into an already strong 96Boards steering committee,” said Yang Zhang, Director of 96Boards.  “HopeRun has established a reputation for producing reliable software solutions for a range of global customers and we’re excited to work with them to bring their experience into the 96Boards initiative.” + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [http://www.96Boards.org](https://www.96boards.org/). + +**About HopeRun** +Jiangsu HopeRun Software Co., Ltd is a leading integrated software solution and services provider headquartered in China with global offices. Founded in 2006, HopeRun today has 5,000 employees and is a member of the Information Technology Service Standards (ITSS) Working Group run by China’s MIIT (Ministry of industry and information technology). The company was recently ranked in the top 20 of companies in China with great potential by Forbes and listed as one of the Forbes China “Up-and-Comers” companies. + +HopeRun provides an end-to-end software service, from initial consultation through to planning, design, development, testing, operation and maintenance. It specialises in telematics, device to device communications solutions, in-vehicle systems, financial informatization, intelligent terminal embedded software, supply chain management software and smart grid informatization software. In addition, the company offers data warehousing, analysis, and migration and integration services. For further information, please visit [HopeRun.net](http://www.hoperun.net) diff --git a/src/content/blogs/hxt-semiconductor-joins-linaro-accelerate-advanced-server-development-arm.mdx b/src/content/blogs/hxt-semiconductor-joins-linaro-accelerate-advanced-server-development-arm.mdx new file mode 100644 index 0000000..4aafae5 --- /dev/null +++ b/src/content/blogs/hxt-semiconductor-joins-linaro-accelerate-advanced-server-development-arm.mdx @@ -0,0 +1,35 @@ +--- +title: HXT Semiconductor Joins Linaro to Accelerate Advanced Server Development on Arm +description: Budapest, Hungary; 6 March 2017 +image: linaro-website/images/blog/code +tags: + - arm + - linaro-connect +author: linaro +date: 2017-03-06T07:57:57.000Z +link: /news/hxt-semiconductor-joins-linaro-accelerate-advanced-server-development-arm/ +related: [] + +--- + +Budapest, Hungary; 6 March 2017 + +Linaro Limited, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced that Guizhou Huaxintong Semiconductor Technology Co., Ltd (HXT Semiconductor) has joined Linaro as a member of the Linaro Enterprise Group (LEG). + +“We’re very pleased to welcome HXT Semiconductor as a LEG member and we look forward to helping them accelerate the deployment of Arm based server solutions into China's cloud computing and data center industries,” said George Grey, Linaro CEO. “Linaro and Arm are leading the effective collaboration of worldwide leaders in the Arm ecosystem across time-zone, cultural and language barriers. We will work with HXT Semiconductor and the enterprise distribution, other system-on-chip (SoC), and data center equipment vendors in LEG, on software that ensures interoperability, innovation and choice for Arm-based data center class products for all markets.” + +HXT Semiconductor is a joint venture between China's Guizhou province and Qualcomm. The venture is registered in Guizhou province, the first region to build an industrial cluster for big data development in China. The area is already home to a data center cluster of more than 2.5 million servers for companies including China Mobile, China Telecom and China Unicom. + +“We decided to license the Arm architecture because it is ideal for Internet services and big data processing in the data center,” said Dr. Kai Wang, CEO of HXT Semiconductor. “We have now joined Linaro to work together with other members of the Arm-based server ecosystem on common software engineering challenges so that we can focus our own internal resources on product differentiation in the Chinese chip market. As a natural extension, designed and highly optimized from Arm architecture, HXT server SoC will accelerate advanced server chipset technologies in the rapidly expanding Chinese server market in areas such as Datacenter, Cloud, HPC and Storage.” + +LEG was established in November 2012 as the first vertical segment group within Linaro. The group was formed to accelerate Arm server ecosystem development and it extended the list of Linaro members beyond Arm silicon vendors to Server OEM’s and commercial Linux providers. LEG now has 12 member companies and over 50 engineers. As the range and capabilities of SoCs have grown, the benefits to be gained from collaboration on common open source software across the industry increase. Linaro’s goals are to enable more rapid innovation in the industry through using shared resources to engineer common software elements, enabling each member to focus more of their own resources on product differentiation. + +**About Guizhou Huaxintong Semiconductor Technologies Co., Ltd. (HXT Semiconductor)** + +Guizhou Huaxintong Semiconductor Technologies Co., Ltd is a joint venture between the People’s Government of Guizhou Province and Qualcomm, registered at Gui’An New Area, Guizhou Province, with operations and R\&D center in Beijing and Shanghai. HXT Semiconductor specializes in designing, developing, and selling sophisticated server chips that will fulfill the needs of enterprises in China. Its establishment will contribute to the growth of China’s IC industry and improve the design and development capacities of China’s chips industry. In this way it will help China to achieve the dream of strong chips. + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company is headquartered in the UK with over 300 engineers working in 26 countries on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/industry-leaders-collaborate-to-accelerate-software-ecosystem-for-arm-servers-and-join-linaro.mdx b/src/content/blogs/industry-leaders-collaborate-to-accelerate-software-ecosystem-for-arm-servers-and-join-linaro.mdx new file mode 100644 index 0000000..d3607a1 --- /dev/null +++ b/src/content/blogs/industry-leaders-collaborate-to-accelerate-software-ecosystem-for-arm-servers-and-join-linaro.mdx @@ -0,0 +1,129 @@ +--- +excerpt: "AMD, AppliedMicro, Calxeda, Canonical, Cavium, Facebook, HP, Marvell + and Red Hat join existing Linaro members Arm, HiSilicon, Samsung and + ST-Ericsson to form new group focused on accelerating Linux development for + Arm servers " +title: Industry Leaders Collaborate to Accelerate Software Ecosystem for Arm + Servers, and Join Linaro +description: AMD, AppliedMicro, Calxeda, Canonical, Cavium, Facebook, HP, + Marvell and Red Hat join existing Linaro members Arm, HiSilicon, Samsung and + ST-Ericsson to form new group focused on accelerating Linux development for + Arm servers +image: linaro-website/images/blog/linaro-logo +author: linaro +date: 2012-11-01T12:21:45.000Z +tags: [] +link: /news/industry-leaders-collaborate-to-accelerate-software-ecosystem-for-arm-servers-and-join-linaro/ +categories: + - news +related: [] + +--- + +SANTA CLARA, US - 1 NOV 2012 + +## In summary + +AMD, AppliedMicro, Calxeda, Canonical, Cavium, Facebook, HP, Marvell and Red Hat join existing Linaro members Arm, HiSilicon, Samsung and ST-Ericsson to form new group focused on accelerating Linux development for Arm servers + +Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, announced today the formation of the Linaro Enterprise Group (LEG) and the addition of AMD, Applied Micro Circuits Corporation, Calxeda, Canonical, Cavium, Facebook, HP, Marvell and Red Hat as Linaro members. + +With significant market interest in energy-efficient Arm-based servers, industry leaders have joined together through Linaro, creating LEG, to collaborate and accelerate the development of foundational software for Arm Server Linux. LEG benefits have broad industry implications, including time to market acceleration, lower development costs, and access to innovative and differentiated systems, fundamental to the Arm ecosystem. + +The new LEG members have joined existing Linaro members Arm, Samsung and ST-Ericsson to create a shared software engineering team and steering committee. The team will build on Linaro’s experience of bringing competing companies together to work on common solutions and enable OEMs, commercial Linux providers and System on a Chip (SoC) vendors to collaborate in a neutral environment on the development and optimization of the core software needed by the rapidly emerging market for low-power hyperscale servers. + +Speaking at Arm’s largest annual developer conference, TechCon, Arm CEO Warren East said “The significance of key industry players coming together like this to develop new aspects of the ecosystem is showing the transformational position the industry is now in. As power and energy become increasing costs to business, there continues to be a need to drive down costs and this means a total reinvention of the server space. There will be a range of server solutions based on Arm technology as the entire business community looks to reduce cost of ownership and achieve energy efficiency. Ultimately, it is the partnership approach which is vital to encourage innovation in this space and we are delighted to see LEG shares this vision. By changing the way we process data, the opportunity for a smarter, more connected future can be truly realized.” Concurrently, at the Linaro Connect event in Copenhagen, the LEG engineering team is engaged in face-to face meetings as part of Linaro’s regular engineering conference at which over 300 engineers from more than 80 companies have been defining the future of Linux on Arm. + +“Linaro is building a high-quality software engineering team that is working with our members on the development of key enabling software for the new generation of low-power, high-performance, hyperscale servers,” said George Grey, CEO of Linaro, “We are especially pleased with the broad industry support and to be working with commercial Linux providers and OEMs in addition to SoC vendors to ensure that we meet the requirements of all members of the ecosystem.” + +Linaro uses a unique business model where multiple companies jointly invest in a software engineering team that creates core open source software in a collaborative and transparent environment. The effectiveness of Linaro’s approach has been demonstrated by Linaro becoming the third-largest company contributor to the Linux 3.5 kernel\*. Linaro’s contribution to improving Arm’s support in the open source Linux community has recently been recognized by Linus Torvalds\*\*. + +“Linux is driving innovation in every area of computing from mobile and embedded to the cloud. Linaro’s enterprise efforts will bring together software engineers to help accelerate Linux development for Arm servers, and we’re confident that this new server-focused group will advance Linux in these areas and offer additional choices to Linux users around the world.” said Jim Zemlin, executive director of the Linux Foundation. + +Arm servers are expected to be initially adopted in hyperscale computing environments, especially in large web farms and clusters, where flexible scaling, energy efficiency and an optimal footprint are key design requirements. The Linaro Enterprise Group will initially work on low-level Linux boot architecture and kernel software for use by SoC vendors, commercial Linux providers and OEMs in delivering the next generation of low-power Arm-based 32- and 64-bit servers. Linaro expects initial software delivery before the end of 2012 with ongoing releases thereafter. + +\*Source: Who wrote 3.5? Greg Kroah-Hartman, LWN, 25 July 2012: [https://lwn.net/Articles/507986/](https://lwn.net/Articles/507986/) (subscription required)\*\* + +Source: Torvalds touts Linux’s advances in power, Arm and cell phones Paula Rooney, ZDNet, 30 August 2012 + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 120 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit . + +#### Linaro Enterprise Group (LEG) Founding Member Testimonials + +**AMD** + +“AMD knows from experience with AMD64 and now with the Heterogeneous Systems Architecture Foundation that an open software ecosystem is critical for enabling a disruptive play with 64-bit Arm servers,” said Leendert van Doorn, Corporate Fellow, AMD. “The Linaro Enterprise Group enables the industry to work cooperatively developing a software ecosystem for Arm servers. By joining forces we eliminate unnecessary ecosystem fragmentation and instead focus on unique innovations that truly benefit our customers.” + +*About AMD:* AMD is a semiconductor design innovator leading the next era of vivid digital experiences with its ground-breaking AMD Accelerated Processing Units (APUs) that power a wide range of computing devices. AMD’s server computing products are focused on driving industry-leading cloud computing and virtualization environments. AMD’s superior graphics technologies are found in a variety of solutions ranging from game consoles, PCs to supercomputers. For more information, visit[http://www.amd.com](http://www.amd.com/). + +**Applied Micro Circuits Corporation** + +“By coming together with other leading hardware and software companies involved in developing Arm server-based solutions, AppliedMicro reinforces its commitment to driving down the TCO associated with deploying next-generation cloud and enterprise infrastructures,” said Vinay Ravuri, general manager, X-Gene, AppliedMicro. “As the first company to deploy an Arm 64-bit Server on a Chip platform, we understand the importance of defining the future of Arm Server Linux and pioneering the ecosystem through our LEG membership.” + +*About Applied Micro Circuits Corporation:* Applied Micro Circuits Corporation is a global leader in energy conscious computing solutions for public and enterprise clouds, telco, enterprise embedded applications, data center, consumer and SMB applications. With a heritage of innovation in high-speed connectivity and high-performance computing, AppliedMicro delivers silicon solutions that dramatically lower total cost of ownership for service provider and data center infrastructures. AppliedMicro’s corporate headquarters are located in Sunnyvale, California. Sales and engineering offices are located throughout the world. For further information regarding AppliedMicro, visit the company’s Web site at [http://www.apm.com](http://www.apm.com/), Media contact: Diane Orr, +1 408-358-1617, diane@orr-co.com. + +**Arm** + +“Linaro offers a perfect example of the Arm ecosystem in action: Leading companies are partnering to meet the demand for energy efficient, high performance servers that benefit everyone from developers to businesses and consumers as the planet’s data levels soar. The inclusive and innovative Arm partnership and Arm Connected Community business models built across mobile devices are now coming to the server space. Linaro is open source and is now opening new doors for all kinds of business”, said Ian Drew Arm Executive Vice President, Marketing, Arm. + +*About Arm:* Arm designs the technology that lies at the heart of advanced digital products, from wireless, networking and consumer entertainment solutions to imaging, automotive, security and storage devices. Arm’s comprehensive product offering includes 32-bit RISC microprocessors, graphics processors, video engines, enabling software, cell libraries, embedded memories, high-speed connectivity products, peripherals and development tools. Combined with comprehensive design services, training, support and maintenance, and the company’s broad Partner community, they provide a total system solution that offers a fast, reliable path to market for leading electronics companies. See [www.arm.com](http://www.arm.com/) for more information. Media contact: Andy Phillips, Arm PR. andy.phillips@arm.com; +44 7771975925 + +**Calxeda** + +“Arm has the potential to take significant market share with a range of products from different Arm partners; this is a movie we have all seen before playing out in the mobile space,” said Barry Evans, CEO and founder of Calxeda. “But unlike the mobile space, the data center requires consistent interfaces to allow a single kernel to support everyone’s hardware. LEG is exactly what is needed to bring the industry together to ensure software compatibility across the range of innovative Arm server implementations.” + +*About Calxeda:* Founded in January 2008, Calxeda brings new performance density to the datacenter on a very attractive power foot print by leveraging ultra-low power processors as used on mobile phones as a foundation for its revolutionary technology. Calxeda will make it possible for datacenter managers to increase the density of their computer resources while significantly reducing the need for power, space and cooling. See [www.calxeda.com](http://silverlining-systems.com//) for more information. Media contact: Laura Beck, +1 512-786-1098, pr@calxeda.com + +**Canonical** + +“As a long time supporter of Linaro, Canonical is pleased to be a founding member of the Linaro Enterprise Group, working with others to make Arm servers more accessible to a broader market. With the release of Ubuntu 12.04 LTS, Ubuntu has been the only commercially supported OS platform that enables deployment of Arm server technology. Ubuntu Server has enabled developers and enterprises to start building prototypes and implementing Arm-based servers. LEG provides the perfect context for us to build on this leadership and to help accelerate the commercial adoption of hyperscale and Arm servers” said Jane Silber, CEO at Canonical. + +*About Canonical:* Canonical is the commercial sponsor of the Ubuntu project and the leading provider of support services for Ubuntu deployments in the enterprise. Ubuntu is a free, open- source platform for client, server and cloud computing. Since its launch in 2004, it has become a natural choice for users of all kinds, from Fortune 500 companies to hardware makers, content providers, software developers and individual technologists. With developers, support staff and engineering centres all over the world, Canonical is uniquely positioned to help its partners and enterprise customers make the most of Ubuntu. Canonical is a privately held company. For more information, please visit [http://www.ubuntu.com/partners/arm](http://www.ubuntu.com/partners/arm) or[http://www.ubuntu.com/download/arm](http://www.ubuntu.com/download/arm). Media contact: pr@canonical.com + +**Cavium** + +“Cavium’s Project Thunder is designed to deliver a family of highly integrated, multi-core processors that will incorporate full custom cores built from the ground up based on the 64-bit Armv8 instruction set architecture (ISA) into an innovative system-on-chip (SoC) solution. Thunder SoCs are designed to redefine features, performance, power and cost metrics for the next generation cloud and datacenter markets,” said Raghib Hussain, CTO and Cofounder of Cavium. “We are committed to supporting Linaro’s LEG effort that will provide a unified and consistent software base for our customers and ecosystem partners. LEG’s software specifications and deliverables will accelerate Arm based server development.” + +*About Cavium:* Cavium is a leading provider of highly integrated semiconductor products that enable intelligent processing in networking, communications and the digital home. Cavium offers a broad portfolio of integrated, software compatible processors ranging in performance from 10 Mbps to over 100 Gbps that enable secure, intelligent functionality in enterprise, data-center, broadband/consumer and access & service provider equipment. Cavium’s processors are supported by ecosystem partners that provide operating systems, tool support, reference designs and other services. Cavium’s principal offices are in San Jose, California with design team locations in California, Massachusetts, India and China. For more information, please visit:[www.cavium.com](https://www.marvell.com/). Media contact: Angel Atondo, +1 408-943-7417 angel.atondo@cavium.com. + +**Facebook** + +“Arm microprocessor architecture has the potential to bring about a fundamental shift in the datacenter industry, enabling new levels of compute and energy efficiency and spurring greater competition in the server CPU market,” said Frank Frankovsky, VP of hardware design and supply chain for Facebook. “But a lot of work must still be done to turn that vision into a reality, and the Linaro Enterprise Group can play a crucial role by ensuring that we have the software ecosystem necessary to support these new Arm-based server solutions. Facebook is happy to join LEG and invest in this ecosystem.” + +*About Facebook:* Founded in 2004, Facebook’s mission is to make the world more open and connected. People use Facebook to stay connected with friends and family, to discover what’s going on in the world, and to share and express what matters to them. More information about Facebook is available at [http://newsroom.fb.com/](http://newsroom.fb.com/), Media contact: press@fb.com. + +**HP** + +“As data center capacity requirements explode, clients look for solutions that will provide extreme energy efficiency, cost savings and reduced datacenter footprint,” said Tim Wesselman, senior director Partner Strategy, Hyperscale Business Group, Industry Standard Servers and Software, HP. “Through collaboration with LEG and its members, HP extends the promise of Project Moonshot and the development of the Arm server software ecosystem to offer a broad range of innovative System On Chip (SoC) designs and Linux server distributions.” + +*About HP:* HP creates new possibilities for technology to have a meaningful impact on people, businesses, governments and society. The world’s largest technology company, HP brings together a portfolio that spans printing, personal computing, software, services and IT infrastructure to solve customer problems. More information about HP is available at [here](https://www8.hp.com/uk/en/hp-information.html). Media contact: Annalie Grubbs, annalie.dru.grubbs@hp.com. + +**HiSilicon** + +“As a recent core member of Linaro, we’re very happy to be a founding member of the Linaro Enterprise Group (LEG)” said Teresa He, President of HiSilicon. “The number of leading companies involved in LEG is a clear indication of the requirement in this space and we look forward to working with them to accelerate the development of the Arm server software ecosystem.” + +*About HiSilicon:* HiSilicon Technologies Co., Ltd. was established in October 2004. Headquartered in Shenzhen, China, Hisilicon has set up design divisions in Beijing, Shanghai, Silicon Valley (USA) and Sweden. The company is a leading chipset solution provider for telecom network, wireless terminal and digital media with the advantage of providing end-to-end chipsets and solutions from telecom network to consumer electronics. It has been serving more than 200 global operators in over 100 countries and will continue to bring maximum value to global operators and consumers. For more information, please visit [www.hisilicon.com](http://www.hisilicon.com/). + +**Marvell** + +“As demand for cloud services continues its rapid growth curve, the limitations of traditional server technologies come into plain sight,” said Steve Dansey, associate vice president of Marketing for the Cloud Services and Infrastructure (CSI) Business Unit of Marvell Semiconductor, Inc. “With a fundamentally efficient architecture that has already proven itself via the dominant adoption in the mobile space, Arm is a natural fit to address the challenges of today’s scale-out and big data cloud service and application centers. In keeping with Marvell’s mission to supply the leading platforms for all aspects of the connected lifestyle, we fully support LEG’s endeavor to accelerate Arm adoption in cloud servers by enabling software compatibility across different custom Arm SoCs that balance Storage, Networking and Compute resources.” + +*About Marvell:* Marvell is a world leader in the development of storage, communications and consumer silicon solutions. Marvell’s diverse product portfolio includes switching, transceiver, communications controller, wireless and storage solutions that power the entire communications infrastructure, including enterprise, metro, home and storage networking. As used in this release, the term “Marvell” refers to Marvell Technology Group Ltd. and its subsidiaries. For more information, visit [www.Marvell.com](https://www.marvell.com/). Media contact: Kim Anderson, +1-408-222-0950, kimander@marvell.com. + +**Red Hat** + +*About Red Hat:* Red Hat is the world’s leading provider of open source software solutions, using a community-powered approach to reliable and high-performing cloud, Linux, middleware, storage and virtualization technologies. Red Hat also offers award-winning support, training, and consulting services. As the connective hub in a global network of enterprises, partners, and open source communities, Red Hat helps create relevant, innovative technologies that liberate resources for growth and prepare customers for the future of IT. Learn more at [www.redhat.com](http://www.redhat.com/). Media contact: Stephanie Wonderlick, swonderl@redhat.com. + +**Samsung** + +*About Samsung:* Samsung Electronics Co., Ltd. is a global leader in semiconductor, telecommunication, digital media and digital convergence technologies with 2009 consolidated sales of 16.8 billion. Employing approximately 188,000 people in 185 offices across 65 countries, the company consists of eight independently operated business units: Visual Display, Mobile Communications, Telecommunication Systems, Digital Appliances, IT Solutions, Digital Imaging, Semiconductor and LCD. Recognized as one of the fastest growing global brands, Samsung Electronics is a leading producer of digital TVs, memory chips, mobile phones and TFT-LCDs. To learn more visit: [www.samsung.com](http://www.samsung.com/). + +**ST-Ericsson** + +“As a founding member of Linaro, ST-Ericsson is pleased to see Linaro extending its successful model into other computing segments. The Linux on Arm ecosystem is demonstrating its strength by adding new members and growing beyond mobile.” said Bjorn Ekelund, Head of Ecosystem, Research and Innovation at ST-Ericsson. + +*About ST-Ericsson:* ST-Ericsson is a world leader in developing and delivering a complete portfolio of innovative mobile platforms and cutting-edge wireless semiconductor solutions across the broad spectrum of mobile technologies. The company is a leading supplier to the top handset manufacturers. To learn more visit: [www.stericsson.com](https://www.ericsson.com/en). Media contact: media.relations@stericsson.com. diff --git a/src/content/blogs/industry-leaders-form-autoware-foundation-to-accelerate-collaboration-in-autonomous-driving.mdx b/src/content/blogs/industry-leaders-form-autoware-foundation-to-accelerate-collaboration-in-autonomous-driving.mdx new file mode 100644 index 0000000..3ca217f --- /dev/null +++ b/src/content/blogs/industry-leaders-form-autoware-foundation-to-accelerate-collaboration-in-autonomous-driving.mdx @@ -0,0 +1,61 @@ +--- +title: Autoware Foundation to Focus on Autonomous Driving +description: Linaro will collaborate with Tier IV, Inc., and US-based company + Apex.AI to form the Autoware Foundation to focus on autonomous driving. Read + more here. +date: 2018-12-10T09:00:00.000Z +image: linaro-website/images/blog/road-timelapse +tags: + - ai-ml + - security + - datacenter +author: linaro +related: [] + +--- + +Linaro Ltd, the open source collaborative engineering organization, Japan-based intelligent vehicle technology company Tier IV, Inc., and US-based autonomous mobility systems software company Apex.AI announced today the formation of the Autoware Foundation. + +The Autoware Foundation is a non-profit organisation created to initiate, grow, and fund open source collaborative engineering Autoware projects, starting with Autoware.AI, Autoware.Auto, and Autoware.IO. Autoware.AI is the original Autoware project started in 2015 by Shinpei Kato at Nagoya University that is being used globally by more than 100 companies in more than 30 vehicles today. Autoware.Auto is a rewrite of Autoware using ROS 2.0 for certifiable software stacks used in vehicles. Autoware.IO focuses on heterogeneous platform support based on 96Boards products, vehicle control interfaces as well as a collection of third-party software and hardware tools to help deliver the core values of Autoware. Examples of Autoware.IO projects include simulators, device drivers for sensors, by-wire controllers for vehicles, and hardware-independent programs for SoC boards. + +Shinpei Kato from Tier IV and the University of Tokyo, Jan Becker from Apex.AI and Stanford University and Yang Zhang from Linaro 96Boards and the Chinese Academy of Sciences AI Institute have together formed the founding Board of Directors for the Autoware Foundation. The Board is responsible for the operation of the Autoware Foundation, while a Technical Steering Committee is being formed from representatives of the Premium Members to drive the technical direction of the projects. Founding Premium Members include Apex.AI, Arm, AutoCore, AutonomouStuff, Huawei, Kalray, Linaro 96Boards, LG, Parkopedia, StreetDrone, Tier IV, TRI-AD (Toyota Research Institute Advanced Development, Inc), and Velodyne. The Premium Members are supported by founding Industrial and Academic & Non-Profit Members including eSOL, Intel, Nagoya University, OSRF (Open Source Robotics Foundation), RoboSense, Semi Japan, SiFive, and Xilinx. + +### Member and Board quotes + +> “As we work towards the mass deployment of safe, fully autonomous vehicles, we need to ensure that automotive players have the ability to influence technical direction and implement platform support for their solutions,” said Mark Hambleton, vice president open source software, Arm. “This partnership will allow the Autoware ecosystem to further collaborate on certifiable software stacks for secure, safe, and efficient next-generation vehicles.” + +> “The AutonomouStuff team leads the industry in implementing, supporting and deploying Autoware applications in automated vehicles and fleets,” said Bobby Hambrick, AutonomouStuff founder and CEO. “As part of the Autoware Foundation, we look forward to enabling further rapid development of autonomous driving solutions that will utilize our combined solutions.” + +> “We are thrilled to be joining the Autoware Foundation. Huawei is a leading global ICT solutions provider. We advocate customer-centricity, dedication, and continuous innovation based on customer needs,” said Jerry Su, Chief Architect of Huawei Autonomous Driving. “collaboration with Autoware enables us to work with the community to develop the self-driving vehicle software that accelerates the industry growth and benefits our customers.” + +> “We are very excited to be participating in the Autoware Foundation creation” said Eric Baissus, CEO of Kalray. “The Autoware open software stack is one of the most advanced software options available today for autonomous and intelligent systems. We will work with the other members of the foundation to open, extend and industrialize this software solution and make it run with optimum efficiency on Kalray’s MPPA intelligent processor family.” + +> “We are excited to join the Autoware Foundation and to offer LG’s autonomous driving simulator with built-in support for Autoware to facilitate research, development, and testing of autonomous software. We look forward to collaborating with foundation members and the community at large as we drive towards an autonomous future together.” said Seonman Kim, Head of Advanced Platform Lab, VP from LG Electronics. + +> Parkopedia’s Head of Autonomous Driving Brian Holt said, “We are thrilled to be joining the Autoware Foundation at such an early stage following our involvement and contributions to the Autoware open source self-driving car project. Parkopedia is committed to building high quality automotive-grade maps to support Autonomous Valet Parking and our collaboration with Autoware enables us to work with the community to develop the self-driving car software on which we will demonstrate these maps.” + +> Dr. James Kuffner, TRI-AD CEO & TRI Executive Advisor, said “Through participation in the Autoware Foundation, TRI-AD would like to help build a large, engaged, and self-governed open source community around Autonomous Driving Technology by contributing and providing support for code that it will be using internally.” + +> eSOL CTO Masaki Gondo said, “We are excited to join this new venture to further accelerate the realization of autonomous driving technologies. eSOL already supports Autoware with its eMCOS, the scalable and safe RTOS, and welcomes the idea of safety-certifiable Autoware.” + +> “We’re excited to be part of the Autoware Foundation. We share a common vision of building new industries on open platforms. It’s exciting to see Autoware committing to using and contributing to ROS 2, and we look forward to further collaboration between our communities,” said Brian Gerkey, CEO, Open Robotics (OSRF). + +> “SiFive welcomes the Autoware initiative, as an open-source self-driving software stack is a natural fit to high-performance AI hardware based on the free and open RISC-V architecture.” said Krste Asanovic, Chief Architect at SiFive. + +> “We are very proud to be Autoware Foundation members. Xilinx’s flexible and adaptive processing platforms will contribute to define projects towards the development of semi-autonomous and autonomous platforms with Autoware Foundation members,” said Dan Isaacs, Director of Automotive Strategy & Market Development, Xilinx Inc. + +> “Apex.AI is thrilled to be part of the Autoware Foundation. An open source project of this scope needs to be independent from a single company and must embrace the community supporting it. Therefore we are following the example of the Linux Foundation and Open Robotics by putting all Autoware projects under the roof of this foundation” said Jan Becker, Director on the Board of the Autoware Foundation and Co-Founder and CEO of Apex.AI, Inc. + +> “Autoware has recognized momentum in the industry and we are excited to be able to build on its success to offer open source projects supporting the deployment of the world’s leading autonomous vehicle technology” said Yang Zhang, Board Director of the Autoware Foundation and Director of 96Boards. “Linaro 96Boards program will help define the standardized hardware platforms on which to maintain and grow the Autoware code base and extend support for it across a broader range of SoC solutions.” + +> “Thanks to everyone involved for making this a memorable launch for the Autoware Foundation. Autoware drives innovations for everyone who loves open-source software and autonomous driving technology. We can never make it alone. We are all here to open the way to the future together,” said Shinpei Kato, Board of Directors of the Autoware Foundation and Founder of Tier IV, Inc. “Tier IV's vision is to democratize an ecosystem for intelligent vehicles. To this end, Tier IV provides mobility service platforms and software toolchains that enable Autoware to be deployed in the emerging market.” + +The Autoware board invites companies in the automotive industry to collaborate on building and optimizing Autoware. To find out more, please visit [https://autoware.org/](https://autoware.org/). + +### About Linaro and 96Boards + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. 96Boards is Linaro’s initiative to build a single worldwide software and hardware community across low-cost development boards based on Arm technology. A large range of products compliant with the 96Boards specifications are already available worldwide and this range is supplemented with additional hardware functionality provided through standardized mezzanine boards. + +To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/introducing-devicetree-org.mdx b/src/content/blogs/introducing-devicetree-org.mdx new file mode 100644 index 0000000..11a3c9f --- /dev/null +++ b/src/content/blogs/introducing-devicetree-org.mdx @@ -0,0 +1,22 @@ +--- +title: Introducing devicetree.org +date: 2016-07-22T11:00:00.000Z +tags: [] +description: We are looking for a well established way for firmware to also make use and modify the Device Tree blobs before handing them over to Linux kernel. With this BoF session we would like to get started a gather ideas etc +image: linaro-website/images/blog/devicetree-logo-dark +author: linaro +related: [] + +--- + + + +Device Tree is well established in the Linux kernel. But since there could be other bootloader(s) and firmware components involved that needs to configure the hardware and thereby also needs to update the Device Tree blobs before passing it to Linux kernel. Therefore we are looking for a well established way for firmware to also make use and modify the Device Tree blobs before handing them over to Linux kernel. With this BoF session we would like to get started a gather ideas etc + + + +[BUD17-416: Benchmark and profiling in OP-TEE ](https://www.slideshare.net/linaroorg/bud17416-benchmark-and-profiling-in-optee) from [Linaro](http://www.slideshare.net/linaroorg) + +**Speakers:** Joakim Bech, Jens Wiklander +**Track:** Security +**Session ID:** BUD17-313 diff --git a/src/content/blogs/is-linaro-a-distribution.mdx b/src/content/blogs/is-linaro-a-distribution.mdx new file mode 100644 index 0000000..3a828a5 --- /dev/null +++ b/src/content/blogs/is-linaro-a-distribution.mdx @@ -0,0 +1,23 @@ +--- +title: Is Linaro a Distribution? +description: In this article, David Rusling answers if Linaro a distribution + within the industry. Read his insights on this here! +image: linaro-website/images/blog/30921188158_953bca1c9f_k +tags: + - arm + - linux-kernel + - toolchain +author: david-rusling +date: 2010-07-01T16:50:00.000Z +link: /blog/community-blog/is-linaro-a-distribution/ +related: [] + +--- + +As I talk to people about Linaro, I'm often asked if Linaro is a distribution. This is, in some ways, an easy mistake to make as we talk about 'releases' and 'test heads'. Not only that, pretty much all of the Linux® based organizations set up recently seem to be Linux distributions, vertical solutions for particular market segments. Linaro is trying to achieve three things. Firstly, Linaro wants to make sure that embedded Linux runs really well on Arm based systems, taking advantage of all of their hardware. Secondly, it wants to ensure that Arm based platforms are fully supported in the latest, up to date, Linux kernel tree at kernel.org. These first two aims are, in themselves, highly useful and you can see how that maps onto the various working groups that have been set up and their blueprints, bug fixes and so on. The third, and final aim, is that any and all Linux distributions make use of Linaro's outputs. In other words, Linaro is not a distribution; instead it is attempting to add value and support many distributions. Think of Linaro as horizontal, not vertical. + +In order for distributions to take Linaro's outputs, we need some collaboration and alignment and this is where Kiko and I have been spending a lot of our time. Both before Linaro was launched and now as it is becoming the engineering organization that we dreamed of. Based on many industry discussions we are creating deliverables such as GCC 4.4.4 and 4.5 that will be useful to distributions this autumn and next spring. We're aiming to be ahead of the distributions, creating stable components that can be pulled into a distribution early in its development cycle. + +Distributions can choose to take the outputs of Linaro in many ways. They could take the results of Linaro's engineering from the stable releases of the upstream open source projects. It may, however, take a long while for support to find its way into a stable upstream release. As an example, any improvements to GCC will not be released by the FSF until spring 2011 as part of the GCC 4.6 release. Many distributions will wait for 4.6 to stabilize before taking it. In other words, they'll stick with 4.5 until around 4.5.3 before taking 4.6.1 or 4.6.2. This is one of the reasons that Linaro is creating local stable releases of software (the other is so that we can test the software and tools). Distributions may take these code bases as built binaries or as source trees; whatever mechanism suits them. + +So, how do we test the Linaro components? This is where the notion of 'test heads' comes in. Our initial test head is a small test image known as AEL (Arm Embedded Linux). This style of test platform is commonly used within silicon providers and is usually ported and run before mainstream distributions are ported to a platform. Think of it as the core components needed by a distribution. Within Linaro we will use AEL to make sure that the toolchain, kernel and middleware are stable and functional but may also choose to test against larger distributions to continuously improve the quality of our engineering output. The level of testing will be sufficient for device manufacturers and distributions to take Linaro components and base their developments on it. diff --git a/src/content/blogs/keynote-speakers-lined-up-for-linaro-connect-sfo15.mdx b/src/content/blogs/keynote-speakers-lined-up-for-linaro-connect-sfo15.mdx new file mode 100644 index 0000000..61b2279 --- /dev/null +++ b/src/content/blogs/keynote-speakers-lined-up-for-linaro-connect-sfo15.mdx @@ -0,0 +1,42 @@ +--- +excerpt: Linaro announced the complete line up of keynote speakers for the + upcoming Linaro Connect San Francisco (SFO15) that will take place September + 21st - 25th in Burlingame, California. The San Francisco conference will + feature several keynote speakers. +title: Keynote Speakers Lined Up for Linaro Connect SFO15 +description: Linaro announced the complete line up of keynote speakers for the + upcoming Linaro Connect San Francisco (SFO15) that will take place September + 21st - 25th in Burlingame, California. The San Francisco conference will + feature several keynote speakers. +image: linaro-website/images/blog/48784720458_63040ac998_k +author: linaro +date: 2015-09-10T20:13:28.000Z +link: /news/keynote-speakers-lined-up-for-linaro-connect-sfo15/ +tags: [] +related: [] + +--- + +Cambridge, UK; September 10, 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced the complete line up of keynote speakers for the upcoming [Linaro Connect San Francisco (SFO15) ](https://resources.linaro.org/en/tags/9aa85e69-2e5c-4d78-a77e-a86d049d56cb)that will take place September 21st - 25th in Burlingame, California. The San Francisco conference will feature several[ keynote speakers](https://resources.linaro.org/en/tags/9aa85e69-2e5c-4d78-a77e-a86d049d56cb). These specially invited industry leaders set the agenda for each day, sharing insights as well as presenting their vision of what is taking place in the Arm ecosystem. Linaro’s Chief Executive Officer George Grey will welcome attendees on Monday to SFO15. He will speak on the future of open source software across a full range of segments and how Linaro is having an impact. + +**Keynote speakers scheduled for the Linaro Connect SFO15 include:** + +* George Grey - Chief Executive Officer, Linaro +* Simon Segars – Chief Executive Officer, Arm +* Tiger Hu –  Architect, Alibaba Infrastructure Service Group +* Suresh Gopalakrishnan – Corporate VP and General Manager, Server Business, AMD +* Neil Trevett – Vice President Mobile Ecosystem, Nvidia and President of Khronos Group +* Dave Neary – NFV/SDN Community Strategy, Red Hat +* John Simmons - Media Platform Architect, Microsoft +* Karen Sandler - Executive Director, Software Freedom Conservancy + +The five-day event, which has sold out the past two shows, is celebrating its fifth year and has become the event to attend if you are interested in Linux development and related Arm-based ecosystems.  The agenda includes seventeen unique break-out track topics for attendees. Tracks will cover topics such as Android, Graphics and Multimedia, Kernel Consolidation, Platform Development, Power Management, QA and Infrastructure, Security, Tools, Validation and LAVA, and training + +Along with the regular track sessions, there will be additional opportunities for attendees to learn about Linaro’s work. Each day has been designated a focus area that will be highlighted that day. On Monday will be Community and Tuesday will be Linaro Home Group and Linaro Mobile Group day. The Linaro Enterprise Group will host an Arm Server Ecosystem day on Wednesday that will connect Linux/Linaro developers with other project developers and will be an opportunity to see the latest 64-bit Arm hardware and software development in this space.  The Linaro Networking Group is also planning a special demo session at the end of its focused sessions on Thursday. Finally on Friday Linaro will host its traditional Demo Friday. The demos will also feature in some of the keynotes and at other times during the week, but participants will get to see most of them all together at Demo Friday. This is a great opportunity for attendees to see all the work that Linaro and its members have been focused on. + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company is a collaborative engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit [](/) and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/kprobes-event-tracing-armv8.mdx b/src/content/blogs/kprobes-event-tracing-armv8.mdx new file mode 100644 index 0000000..1238818 --- /dev/null +++ b/src/content/blogs/kprobes-event-tracing-armv8.mdx @@ -0,0 +1,318 @@ +--- +title: Kprobes Event Tracing on Armv8 +description: In this article, David Long takes a detailed look at Kprobes Event + Tracing on Armv8. Read about his findings here! +image: linaro-website/images/blog/40965990761_090a30658a_k +tags: + - arm + - linux-kernel +author: david-long +date: 2016-12-16T17:37:39.000Z +link: /blog/kprobes-event-tracing-armv8/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner](/linaro-website/images/blog/core-dump) + +## Introduction + +Kprobes is a kernel feature that allows instrumenting the kernel by setting arbitrary breakpoints that call out to developer-supplied routines before and after the breakpointed instruction is executed (or simulated). See the kprobes documentation[\[1\]](https://github.com/torvalds/linux/blob/master/Documentation/features/debug/kprobes/arch-support.txt) for more information. Basic kprobes functionality is selected with CONFIG\_KPROBES. Kprobes support was added to mainline for arm64 in the v4.8 release. + +In this article we describe the use of kprobes on arm64 using the debugfs event tracing interfaces from the command line to collect dynamic trace events. This feature has been available for some time on several architectures (including arm32), and is now available on arm64. The feature allows use of kprobes without having to write any code. + +## Types of Probes + +The kprobes subsystem provides three different types of dynamic probes described below. + +### Kprobes + +The basic probe is a software breakpoint kprobes inserts in place of the instruction you are probing, saving the original instruction for eventual single-stepping (or simulation) when the probe point is hit. + +### Kretprobes + +Kretprobes is a part of kprobes that allows intercepting a returning function instead of having to set a probe (or possibly several probes) at the return points. This feature is selected whenever kprobes is selected, for supported architectures (including Armv8). + +### Jprobes + +Jprobes allows intercepting a call into a function by supplying an intermediary function with the same calling signature, which will be called first. Jprobes is a programming interface only and cannot be used through the debugfs event tracing subsystem. As such we will not be discussing jprobes further here. Consult the kprobes documentation if you wish to use jprobes. + +## Invoking Kprobes + +Kprobes provides a set of APIs which can be called from kernel code to set up probe points and register functions to be called when probe points are hit. Kprobes is also accessible without adding code to the kernel, by writing to specific event tracing debugfs files to set the probe address and information to be recorded in the trace log when the probe is hit. The latter is the focus of what this document will be talking about. Lastly kprobes can be accessed through the perf command. + +### Kprobes API + +The kernel developer can write functions in the kernel (often done in a dedicated debug module) to set probe points and take whatever action is desired right before and right after the probed instruction is executed. This is well documented in kprobes.txt. + +### Event Tracing + +The event tracing subsystem has its own documentation[\[2\]](https://github.com/torvalds/linux/blob/master/Documentation/trace/events.rst) which might be worth a read to understand the background of event tracing in general. The event tracing subsystem serves as a foundation for both tracepoints and kprobes event tracing. The event tracing documentation focuses on tracepoints, so bear that in mind when consulting that documentation. Kprobes differs from tracepoints in that there is no predefined list of tracepoints but instead arbitrary dynamically created probe points that trigger the collection of trace event information. The event tracing subsystem is controlled and monitored through a set of debugfs files. Event tracing (CONFIG\_EVENT\_TRACING) will be selected automatically when needed by something like the kprobe event tracing subsystem. + +#### Kprobes Events + +With the kprobes event tracing subsystem the user can specify information to be reported at arbitrary breakpoints in the kernel, determined simply by specifying the address of any existing probeable instruction along with formatting information. When that breakpoint is encountered during execution kprobes passes the requested information to the common parts of the event tracing subsystem which formats and appends the data to the trace log, much like how tracepoints works. Kprobes uses a similar but mostly separate collection of debugfs files to control and display trace event information. This feature is selected with CONFIG\_KPROBE\_EVENT. The kprobetrace documentation[\[3\]](https://github.com/torvalds/linux/blob/master/Documentation/trace/kprobetrace.rst) provides the essential information on how to use kprobes event tracing and should be consulted to understand details about the examples presented below. + +### Kprobes and Perf + +The perf tools provide another command line interface to kprobes. In particular "perf probe" allows probe points to be specified by source file and line number, in addition to function name plus offset, and address. The perf interface is really a wrapper for using the debugfs interface for kprobes. + +## Arm64 Kprobes + +All of the above aspects of kprobes are now implemented for arm64, in practice there are some differences from other architectures though: + +* Register name arguments are, of course, architecture specific and can be found in the Arm Arm. + +* Not all instruction types can currently be probed. Currently unprobeable instructions include mrs/msr (except DAIF read), exception generation instructions, eret, and hint (except for the nop variant). In these cases it is simplest to just probe a nearby instruction instead. These instructions are blacklisted from probing because the changes they cause to processor state are unsafe to do during kprobe single-stepping or instruction simulation, because the single-stepping context kprobes constructs is inconsistent with what the instruction needs, or because the instruction can’t tolerate the additional processing time and exception handling in kprobes (ldx/stx). + +* An attempt is made to identify instructions within a ldx/stx sequence and prevent probing, however it is theoretically possible for this check to fail resulting in allowing a probed atomic sequence which can never succeed. Be careful when probing around atomic code sequences. + +* Note that because of the details of Linux Arm64 calling conventions it is not possible to reliably duplicate the stack frame for the probed function and for that reason no attempt is made to do so with jprobes, unlike the majority of other architectures supporting jprobes. The reason for this is that there is insufficient information for the callee to know for certain the amount of the stack that is needed. + +* Note that the stack pointer information recorded from a probe will reflect the particular stack pointer in use at the time the probe was hit, be it the kernel stack pointer or the interrupt stack pointer. + +* There is a list of kernel functions which cannot be probed, usually because they are called as part of kprobes processing. Part of this list is architecture-specific and also includes things like exception entry code. + +## Using Kprobes Event Tracing + +One common use case for kprobes is instrumenting function entry and/or exit. It is particularly easy to install probes for this since one can just use the function name for the probe address. Kprobes event tracing will look up the symbol name and determine the address. The Armv8 calling standard defines where the function arguments and return values can be found, and these can be printed out as part of the kprobe event processing. + +### Example: Function entry probing + +Instrumenting a USB ethernet driver reset function: + +``` +$ pwd +/sys/kernel/debug/tracing +$ cat > kprobe_events < events/kprobes/enable +``` + +At this point a trace event will be recorded every time the driver’s *ax8872\_reset()* function is called. The event will display the pointer to the *usbnet* structure passed in via X0 (as per the Armv8 calling standard) as this function’s only argument. After plugging in a USB dongle requiring this ethernet driver we see the following trace information: + +``` +$ cat trace +# tracer: nop +# +# entries-in-buffer/entries-written: 1/1   #P:8 +# +#                           _-----=> irqs-off +#                          / _----=> need-resched +#                         | / _---=> hardirq/softirq +#                         || / _--=> preempt-depth +#                         ||| / delay +#        TASK-PID   CPU#  |||| TIMESTAMP  FUNCTION +#           | |    |   ||||    |      | +kworker/0:0-4             [000] d... 10972.102939:   p_ax88772_reset_0: +(ax88772_reset+0x0/0x230)   arg1=0xffff800064824c80 +``` + +Here we can see the value of the pointer argument passed in to our probed function. Since we did not use the optional labelling features of kprobes event tracing the information we requested is automatically labeled *arg1*.  Note that this refers to the first value in the list of values we requested that kprobes log for this probe, not the actual position of the argument to the function. In this case it also just happens to be the first argument to the function we’ve probed. + +### Example: Function entry and return probing + +The kretprobe feature is used specifically to probe a function return. At function entry the kprobes subsystem will be called and will set up a hook to be called at function return, where it will record the requested event information. For the most common case the return information, typically in the X0 register, is quite useful. The return value in %x0 can also be referred to as *$retval*. The following example also demonstrates how to provide a human-readable label to be displayed with the information of interest. + +Example of instrumenting the kernel *\_do\_fork()* function to record arguments and results using a kprobe and a kretprobe: + +``` +$ cd /sys/kernel/debug/tracing +$ cat > kprobe_events < events/kprobes/enable +``` + +At this point every call to *do\_fork() will produce two kprobe events recorded into the "\_trace*" file, one reporting the calling argument values and one reporting the return value. The return value shall be labeled "*pid*" in the trace file. Here are the contents of the trace file after three fork syscalls have been made: + +``` +$ cat trace +# tracer: nop +# +# entries-in-buffer/entries-written: 6/6   #P:8 +# +#                              _-----=> irqs-off +#                             / _----=> need-resched +#                            | / _---=> hardirq/softirq +#                            || / _--=> preempt-depth +#                            ||| /     delay +#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION +#              | |       |   ||||       |         | +              bash-1671  [001] d...   204.946007: p__do_fork_0: (_do_fork+0x0/0x3e4) arg1=0x1200011 arg2=0x0 arg3=0x0 arg4=0x0 arg5=0xffff78b690d0 arg6=0x0 +              bash-1671  [001] d..1   204.946391: r__do_fork_0: (SyS_clone+0x18/0x20 <- _do_fork) pid=0x724 +              bash-1671  [001] d...   208.845749: p__do_fork_0: (_do_fork+0x0/0x3e4) arg1=0x1200011 arg2=0x0 arg3=0x0 arg4=0x0 arg5=0xffff78b690d0 arg6=0x0 +              bash-1671  [001] d..1   208.846127: r__do_fork_0: (SyS_clone+0x18/0x20 <- _do_fork) pid=0x725 +              bash-1671  [001] d...   214.401604: p__do_fork_0: (_do_fork+0x0/0x3e4) arg1=0x1200011 arg2=0x0 arg3=0x0 arg4=0x0 arg5=0xffff78b690d0 arg6=0x0 +              bash-1671  [001] d..1   214.401975: r__do_fork_0: (SyS_clone+0x18/0x20 <- _do_fork) pid=0x726 +``` + +### Example: Dereferencing pointer arguments + +For pointer values the kprobe event processing subsystem also allows dereferencing and printing of desired memory contents, for various base data types. It is necessary to manually calculate the offset into structures in order to display a desired field. + +Instrumenting the *do\_wait()* function: + +``` +$ cat > kprobe_events < events/kprobes/enable +``` + +Note that the argument labels used in the first probe are optional and can be used to more clearly identify the information recorded in the trace log. The signed offset and parentheses indicate that the register argument is a pointer to memory contents to be recorded in the trace log. The "*:u32*" indicates that the memory location contains an unsigned four-byte wide datum (an enum and an int in a locally defined structure in this case). + +The probe labels (after the colon) are optional and will be used to identify the probe in the log. The label must be unique for each probe. If unspecified a useful label will be automatically generated from a nearby symbol name, as has been shown in earlier examples. + +Also note the "*$retval*" argument could just be specified as "*%x0*". + +Here are the contents of the "*trace*" file after two fork syscalls have been made: + +``` +$ cat trace +# tracer: nop +# +# entries-in-buffer/entries-written: 4/4   #P:8 +# +#                              _-----=> irqs-off +#                             / _----=> need-resched +#                            | / _---=> hardirq/softirq +#                            || / _--=> preempt-depth +#                            ||| /     delay +#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION +#              | |       |   ||||       |         | +             bash-1702  [001] d...   175.342074: wait_p: (do_wait+0x0/0x260) wo_type=0x3 wo_flags=0xe +             bash-1702  [002] d..1   175.347236: wait_r: (SyS_wait4+0x74/0xe4 <- do_wait) arg1=0x757 +             bash-1702  [002] d...   175.347337: wait_p: (do_wait+0x0/0x260) wo_type=0x3 wo_flags=0xf +             bash-1702  [002] d..1   175.347349: wait_r: (SyS_wait4+0x74/0xe4 <- do_wait) arg1=0xfffffffffffffff6 +``` + +### Example: Probing arbitrary instruction addresses + +In previous examples we have inserted probes for function entry and exit, however it is possible to probe an arbitrary instruction (with a few exceptions). If we are placing a probe inside a C function the first step is to look at the assembler version of the code to identify where we want to place the probe. One way to do this is to use gdb on the vmlinux file and display the instructions in the function where you wish to place the probe. An example of doing this for the *module\_alloc* function in arch/arm64/kernel/modules.c follows. In this case, because gdb seems to prefer using the weak symbol definition and it’s associated stub code for this function, we get the symbol value from System.map instead: + +```bash +$ grep module_alloc System.map +ffff2000080951c4 T module_alloc +ffff200008297770 T kasan_module_alloc +``` + +In this example we’re using cross-development tools and we invoke gdb on our host system to examine the instructions comprising our function of interest: + +```c +$ ${CROSS_COMPILE}gdb vmlinux +(gdb) x/30i 0xffff2000080951c4 +        0xffff2000080951c4 :    sub    sp, sp, #0x30 +        0xffff2000080951c8 :    adrp    x3, 0xffff200008d70000 +        0xffff2000080951cc :    add    x3, x3, #0x0 +        0xffff2000080951d0 :    mov    x5, #0x713             // #1811 +        0xffff2000080951d4 :    mov    w4, #0xc0              // #192 +        0xffff2000080951d8 : +              mov    x2, #0xfffffffff8000000    // #-134217728 +        0xffff2000080951dc :    stp    x29, x30, [sp,#16] +        0xffff2000080951e0 :    add    x29, sp, #0x10 +        0xffff2000080951e4 :    movk    x5, #0xc8, lsl #48 +        0xffff2000080951e8 :    movk    w4, #0x240, lsl #16 +        0xffff2000080951ec :    str    x30, [sp] +        0xffff2000080951f0 :    mov    w7, #0xffffffff        // #-1 +        0xffff2000080951f4 :    mov    x6, #0x0               // #0 +        0xffff2000080951f8 :    add    x2, x3, x2 +        0xffff2000080951fc :    mov    x1, #0x8000            // #32768 +        0xffff200008095200 :    stp    x19, x20, [sp,#32] +        0xffff200008095204 :    mov    x20, x0 +        0xffff200008095208 :    bl    0xffff2000082737a8 <__vmalloc_node_range> +        0xffff20000809520c :    mov    x19, x0 +        0xffff200008095210 :    cbz    x0, 0xffff200008095234 +        0xffff200008095214 :    mov    x1, x20 +        0xffff200008095218 :    bl    0xffff200008297770 +        0xffff20000809521c :    tbnz    w0, #31, 0xffff20000809524c +        0xffff200008095220 :    mov    sp, x29 +        0xffff200008095224 :    mov    x0, x19 +        0xffff200008095228 :    ldp    x19, x20, [sp,#16] +        0xffff20000809522c :    ldp    x29, x30, [sp],#32 +        0xffff200008095230 :    ret +        0xffff200008095234 :    mov    sp, x29 +        0xffff200008095238 :    mov    x19, #0x0               // #0 +``` + +In this case we are going to display the result from the following source line in this function: + +```c +p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, +VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, +NUMA_NO_NODE, __builtin_return_address(0)); +``` + +...and also the return value from the function call in this line: + +``` +if (p && (kasan_module_alloc(p, size) < 0)) { +``` + +We can identify these in the assembler code from the call to the external functions. To display these values we will place probes at 0xffff20000809520c *and* 0xffff20000809521c on our target system: + +``` +$ cat > kprobe_events < events/kprobes/enable +``` + +Now after plugging an ethernet adapter dongle into the USB port we see the following written into the trace log: + +``` +$ cat trace +# tracer: nop +# +# entries-in-buffer/entries-written: 12/12   #P:8 +# +#                           -----=> irqs-off +#                          ----=> need-resched +#                         | ---=> hardirq/softirq +#                         || _--=> preempt-depth +#                         ||| / delay +#        TASK-PID   CPU#  |||| TIMESTAMP  FUNCTION +#           | |    |   ||||    |      | +      systemd-udevd-2082  [000] d... 77.200991: p_0xffff20000809520c: (module_alloc+0x48/0x98) arg1=0xffff200001188000 +      systemd-udevd-2082  [000] d... 77.201059: p_0xffff20000809521c: (module_alloc+0x58/0x98) arg1=0x0 +      systemd-udevd-2082  [000] d... 77.201115: p_0xffff20000809520c: (module_alloc+0x48/0x98) arg1=0xffff200001198000 +      systemd-udevd-2082  [000] d... 77.201157: p_0xffff20000809521c: (module_alloc+0x58/0x98) arg1=0x0 +      systemd-udevd-2082  [000] d... 77.227456: p_0xffff20000809520c: (module_alloc+0x48/0x98) arg1=0xffff2000011a0000 +      systemd-udevd-2082  [000] d... 77.227522: p_0xffff20000809521c: (module_alloc+0x58/0x98) arg1=0x0 +      systemd-udevd-2082  [000] d... 77.227579: p_0xffff20000809520c: (module_alloc+0x48/0x98) arg1=0xffff2000011b0000 +      systemd-udevd-2082  [000] d... 77.227635: p_0xffff20000809521c: (module_alloc+0x58/0x98) arg1=0x0 +      modprobe-2097  [002] d... 78.030643: p_0xffff20000809520c: (module_alloc+0x48/0x98) arg1=0xffff2000011b8000 +      modprobe-2097  [002] d... 78.030761: p_0xffff20000809521c: (module_alloc+0x58/0x98) arg1=0x0 +      modprobe-2097  [002] d... 78.031132: p_0xffff20000809520c: (module_alloc+0x48/0x98) arg1=0xffff200001270000 +      modprobe-2097  [002] d... 78.031187: p_0xffff20000809521c: (module_alloc+0x58/0x98) arg1=0x0 +``` + +One more feature of the kprobes event system is recording of statistics information, which can be found in kprobe\_profile.  After the above trace the contents of that file are: + +``` +$ cat kprobe_profile + p_0xffff20000809520c                                    6            0 +p_0xffff20000809521c                                 6            0 +``` + +This indicates that there have been a total of 8 hits each of the two breakpoints we set, which of course is consistent with the trace log data.  More kprobe\_profile features are described in the kprobetrace documentation. + +There is also the ability to further filter kprobes events.  The debugfs files used to control this are listed in the kprobetrace documentation while the details of their contents are (mostly) described in the trace events documentation. + +## Conclusion + +Linux on Armv8 now is on parity with other architectures supporting the kprobes feature. Work is being done by others to also add uprobes and systemtap support. These features/tools and other already completed features (e.g.: perf, coresight) allow the Linux Armv8 user to debug and test performance as they would on other, older architectures. + +*** + +Bibliography + +[\[1\]](https://github.com/torvalds/linux/blob/master/Documentation/features/debug/kprobes/arch-support.txt) Jim Keniston, Prasanna S. Panchamukhi, Masami Hiramatsu. “Kernel Probes (Kprobes).” *GitHub*. GitHub, Inc., 15 Aug. 2016. Web. 13 Dec. 2016. + +[\[2\]](https://github.com/torvalds/linux/blob/master/Documentation/trace/events.rst) Ts’o, Theodore, Li Zefan, and Tom Zanussi. “Event Tracing.” *GitHub*. GitHub, Inc., 3 Mar. 2016. Web. 13 Dec. 2016. + +[\[3\]](https://github.com/torvalds/linux/blob/master/Documentation/trace/kprobetrace.rst) Hiramatsu, Masami. “Kprobe-based Event Tracing.” *GitHub*. GitHub, Inc., 18 Aug. 2016. Web. 13 Dec. 2016. diff --git a/src/content/blogs/kvm-pciemsi-passthrough-armarm64.mdx b/src/content/blogs/kvm-pciemsi-passthrough-armarm64.mdx new file mode 100644 index 0000000..cc97ab1 --- /dev/null +++ b/src/content/blogs/kvm-pciemsi-passthrough-armarm64.mdx @@ -0,0 +1,322 @@ +--- +author: eric-auger +date: 2016-02-29T23:08:58.000Z +description: > + In this article, Eric Auger takes a detailed look at the KVM PCIe/MSI + Passthrough on Arm/Arm64. Read about his findings here! +excerpt: While PCIe passthrough (the process of assigning a PCIe device to a VM, + also known as device assignment) is supported through a mostly + architecture-agnostic subsystem called VFIO, there are intricate details of an + Arm-based system that require special support for Message Signaled Interrupts + (MSIs) in the context of VFIO passthrough on Arm server systems. +link: /blog/core-dump/kvm-pciemsi-passthrough-armarm64/ +tags: + - linux-kernel + - qemu +title: KVM PCIe/MSI Passthrough on Arm/Arm64 +related: [] + +--- + +![lightbox\_disabled=True core-dump url=https://wiki-archive.linaro.org/CoreDevelopment](/linaro-website/images/blog/core-dump) + +While PCIe passthrough (the process of assigning a PCIe device to a VM, also known as device assignment) is supported through a mostly architecture-agnostic subsystem called VFIO, there are intricate details of an Arm-based system that require special support for Message Signaled Interrupts (MSIs) in the context of VFIO passthrough on Arm server systems. + +## Message Signaled Interrupts + +MSIs are an alternative to wire based interrupts. A device using MSIs does not need a dedicated line to the interrupt controller. Instead, to trigger interrupts a device simply writes at a specific memory address belonging to a piece of HW that can generate interrupts as a result of the memory write.  Such hardware is typically referred to as an MSI controller. The MSI controller derives an interrupt ID from the written message. + +Thus, the MSI-enabled device must be programmed with: + +* the address to write to +* and a payload + +# Arm MSI Controllers + +This chapter gives a brief introduction of 2 Arm MSI controllers, the GICv2m and the GICv3 ITS. We purposely present a simplified overview. + +## GICv2M + +The GICv2m widget contains one or more MSI frames. Each MSI frame is wired up to a set of GIC SPI wires (shared peripheral interrupt). MSI frames should not target the same SPI IDs for isolation purpose. + +![KVM blog image 1](/linaro-website/images/blog/KVM-blog-image-1) + +From the CPU perspective each MSI frame is 4kB wide and contains some info registers telling the base and number of associated SPI IDs and the MSI\_SETSPI\_NS 32-bit register. + +The MSI\_SETSPI\_NS is also referred to as the doorbell. Writes to this register trigger SPI to the GIC. The data payload allows to select the triggered SPI ID. + +The GICv2M does not require any functional programming since the SPIs are statically assigned to each MSI frame. + +Separate MSI frames are provisioned for interrupt isolation purpose. Each frame is supposed to target separate SPI ID windows. Devices attached to separate MSI frames have different SPI ID domains. Of course MSI frame access must be restricted by the bus topology, an IOMMU, or by other means. On the other hand, a system with a single MSI frame cannot do HW IRQ isolation between devices allowed to access that single MSI frame. + +![KVM blog image 2](/linaro-website/images/blog/KVM-blog-image-2) + +## GICv3 ITS + +GICv3 supports a compatibility mode where a similar mechanism as GICv2m is used. But more importantly it supports the Interrupt Translation Service (ITS) mechanism. The ITS exposes a single 64kB MSI frame. This MSI frame contains the GITS\_TRANSLATOR register (ITS doorbell register). This is the address to be written when a device wants to trigger an interrupt. The ITS implements a translation mechanism that takes as input the eventid passed in the MSI data payload, a device id (conveyed out-of-band, typically on the AXI user bits) and outputs an LPI id. LPI stands for Local Peripheral Interrupt. The GIC HW takes this LPI ID as input. + +As opposed to the GICv2M, the ITS must be configured by software before it is used. For example, translation tables need to be programmed before any MSI translation can succeed: + +* A device table entry must exists per deviceid, pointing to a device interrupt translation table + +* An entry must exist in the device interrupt translation table for each eventid the device is likely to produce. This entry basically tells which LPI ID to trigger (and the CPU it targets) + +![KVM blog image 3](/linaro-website/images/blog/KVM-blog-image-3) + +Interrupt translation is also supported on Intel hardware as part of the VT-d spec. The Intel IRQ remapping HW provides a translation service similar to the ITS. The difference is that the intel implementation looks more like a true IOMMU in the sense the translation process uses the MSI address as input as well the MSI data payload. On Arm the deviceid is conveyed out of band. + +So on x86 there is not a single doorbell address MSI messages are written to. Instead each device writes at a different address. This address is within  the 0xFEEX\_XXXXh range and bits 14:0 and bit 2 of the upper 32-bit of the address encode the deviceid (handle) used by the translation process. + +For that reason the IRQ remapping HW is abstracted by IOMMU drivers.  On Arm, ITS is abstracted by irqchip driver. + +# KVM PCI/MSI passthrough, x86/Arm Differences + +This chapter explains why the current VFIO integration (QEMU VFIO PCI device/ kernel VFIO PCI driver) does not work for Arm. + +When a device is assigned to a guest, it is unbound from its native driver and bound to the VFIO-PCI driver. A prerequisite for using VFIO in full feature mode is to have an IOMMU downstream to the device. Indeed The VFIO driver API eventually allows the user-space to set up DMA mappings between the device IOVA space and user-space virtual memory. If no IOMMU mapping does exist for a given IOVA, the related DMA access fails with an IOMMU abort. IOMMU allows DMA access isolation. + +In the virtualization use case, The QEMU VFIO device takes care of mapping all the guest ram region physical addresses to allow them to be accessed by the assigned device. However only the RAM regions are mapped, meaning the peripheral register spaces are not mapped. + +On x86 this does not bring any issue since MSI write translation hit within a special 1MB physical address window \[FEE0\_0000h - FEF0\_000h]. Those transactions target the APIC configuration space and not DRAM, meaning the downstream IOMMU is bypassed. So there is no need to IOMMU map the MSI transaction addresses. + +On Arm however the MSI transactions towards the doorbell are conveyed through the IOMMU. Therefore an IOMMU mapping must exist. This is similar on PowerPC. + +Without changes to the VFIO subsystem, MSIs simply cause IOMMU aborts because no mapping is defined between the address used by the device (IOVA) and the physical address for the MSI frame including the doorbell register. + +The goal of the ongoing work is to create the needed IOMMU mappings for MSI write transactions to eventually reach the hardware MSI frame. + +# Assigned device MSI Setup + +This chapter describes how an MSI is setup for an assigned device. First we discuss the VFIO legacy implementation (upstreamed implementation working for x86). Then we explain the adaptations needed to make it functional on Arm/Arm64. + +## Legacy Implementation + +VFIO decouples the MSI configuration of the physical PCIe device from the configuration performed by the guest driver. + +Practically the MSI message (address/data) programmed by the guest are not used to program the actual physical PCIe device. PCIe configuration space accesses performed by the guest are trapped by VFIO/KVM. The MSI address never is used. The data payload computed by the guest matches a virtual SPI ID and not a physical SPI ID. + +Instead, when the user-space sets IRQ signalling up (VFIO\_DEVICE\_SET\_IRQ ioctl), the host VFIO PCI driver retrieves MSI vectors from the host MSI sub-system. Therefore, it programs the assigned PCIe devices with an MSI message composed by the host msi-parent MSI controller. + +Then the MSI forwarding follows that path: + +host computed MSI message -> host computed physical SPI ID -> eventfd -> guest computed virtual SPI ID + +## Requested adaptation for Arm + +When the VFIO PCI driver programs the assigned physical PCIe device with an MSI message composed by the host, we need to replace the MSI message address (the doorbell host physical address) by an IOVA, mapped onto this doorbell physical address. + +So we need to identify an IOVA that is not already used (ie. not an IOVA matching any guest RAM region GPA). The choice, then, is to either (1) use an IOVA known to the guest (in its GPA space), for example belonging to a virtual GICv2m or other MSI controller created and presented to the guest, or (2) just use some other IOVA, not corresponding to any RAM region. + +It feels natural that user-space communicates the address of a virtual MSI controller to the guest (GICv2m single MSI frame on our case). However we saw the virtual side of MSIs and physical side are completely decoupled. Also a device may need multiple MSI frames or the host MSI controller may be different in character from the virtual one given to the guest. + +So we currently choose not to use the GICv2m MSI frame GPA. Instead QEMU provides a pool of unused GPA to VFIO. + +In mach-virt we have a platform bus which represents a pool of IRQ and MMIO pages. The platform bus is currently used for dynamic instantiation of sysbus devices, especially VFIO platform devices. This 32MB GPA pool with its own GPA allocator is quite well suited to provide an anonymous contiguous pool of GPA=IOVA usable to map MSI frames. This integration does not induce any change in mach-virt memory map. + +We reuse the VFIO DMA MAP ioctl to pass this reserved IOVA region. A new flag (VFIO\_DMA\_FLAG\_MSI\_RESERVED\_IOVA ) is introduced to differentiate such reserved IOVA from RAM IOVA. Then the base/size of the window is passed to the IOMMU driver though a new function introduced in the IOMMU API. + +The IOVA allocation within the supplied reserved IOVA window is performed on-demand, when the MSI controller composes/writes the MSI message in the PCIe device. Also the IOMMU mapping between the newly allocated IOVA and the backdoor address page is done at that time. The MSI controller uses a new function introduced in the IOMMU API to allocate the IOVA and create an IOMMU mapping. + +So there are adaptations needed at VFIO, IOMMU and MSI controller level. The extension of the IOMMU API still is under discussion. Also changes at MSI controller level need to be consolidated. + +# Interrupt Safety + +When an MSI enabled device is assigned to a guest we need to guarantee it cannot trigger MSIs that correspond to interrupt IDs of devices belonging to the host or other guests. Indeed once a device gets access to an MSI frame, shared with others, nothing prevents a malicious user-space driver to trigger DMA requests within that region. This can lead to denial of service attacks. + +On the figure below we can image device #0 is used by the host while devices #1 and #2 are assigned to a guest. + +![KVM blog image 4](/linaro-website/images/blog/KVM-blog-image-4) + +## Interrupt Safety with GICv2m + +On GICv2m the standard way to implement interrupt isolation is to support several MSI frames and make sure guests are assigned with separate MSI frames (host and each guest must have separate MSI frames). This is due to the fact the GICv2m does not support interrupt translation, also known as IRQ remapping (Intel naming). + +Also even with multiple MSI frames, an SR-IOV PCI device attached to a PCI host controller would have a single MSI parent frame. We could not have one VF (virtual function) assigned to one guest and another assigned to another guest for security reasons. + +Since the HW does not support IRQ remapping, the host kernel would need to check that devices attached to a VFIO group do not share an MSI frame with devices outside of the group. Performing such a check in software involves extending the VFIO notion of group viability. This would bring a significant design complexity and the choice was made to consider MSI passthrough without IRQ remapping capable msi-parent as unsafe. + +If the end-user takes the risk to enable such passthrough, he must explicitly load the VFIO*IOMMU\_TYPE1 module with allow\_unsafe\_interrupts parameter set to 1 (see the \_User Perspective* section). This is an obvious limitation, but works the same way in the x86 world. + +## Interrupt Safety with GICv3 ITS + +With GICv3 ITS we do not have this issue since each MSI transaction is tagged with a device-id and the device-id makes possible to separate the LPI domains. The ITS supports IRQ remapping similarly to the Intel VT-d IRQ remapping HW: MSI passthrough is safely supported and users do not need to use the allow\_unsafe\_interrupts parameter. + +# Conclusions + +Supporting MSI passthrough with KVM on Arm platforms requires changes to Linux and QEMU due to underlying differences between the Arm and x86 architectures. Arm platforms with GICv2m MSI controllers will require users to load VFIO with the allow\_unsafe\_interrupts parameter for MSI passthrough to work, but GICv3 ITS platforms will work with VFIO without any additional parameters. + +The changes required to Linux and QEMU are currently being upstreamed by Linaro and the latest versions of the patch series are referenced below \[5, 6]. + +## User Perspective + +This chapter illustrates the assignment of 2 different PCIe devices: + +* Intel 82574L Ethernet Controller (**e1000e**) + +* Intel X540-T2 Ethernet Controller (SR-IOV capable) + +on  AMD 64-bit Arm Overdrive featuring a single GICv2M MSI frame. + +## e1000e Assignment + +#### Host Compilation + +*make defconfig* +*scripts/config -e CONFIG\_IOMMU\_SUPPORT* +*scripts/config -e CONFIG\_IOMMU\_API* +*scripts/config -e CONFIG\_Arm\_SMMU* +*scripts/config -m CONFIG\_VFIO* +*scripts/config -m CONFIG\_VFIO\_PCI* +*scripts/config -m CONFIG\_VFIO\_IOMMU\_TYPE1* +*scripts/config -e CONFIG\_NETDEVICES* +*scripts/config -e CONFIG\_NET\_VENDOR\_AMD* +*scripts/config -e CONFIG\_AMD\_XGBE* +*scripts/config -e CONFIG\_E1000E* + +#### Host PCIe Topology + +*00:00.0 0600: 1022:1a00 +Subsystem: 1022:1a00 +00:02.0 0600: 1022:1a01 +00:02.2 0604: 1022:1a02 +Kernel driver in use: pcieport +**01:00.0 0200: 8086:10d3** +      **Subsystem: 8086:a01f** +      **Kernel driver in use: e1000e*** + +*00:00.0 Host bridge: Advanced Micro Devices, Inc. \[AMD] Device 1a00* +*00:02.0 Host bridge: Advanced Micro Devices, Inc. \[AMD] Device 1a01* +*00:02.2 PCI bridge: Advanced Micro Devices, Inc. \[AMD] Device 1a02* +***01:00.0 Ethernet controller: Intel Corporation 82574L Gigabit Network Connection*** + +#### Module Loading + +allow*unsage\_interrupts opt-in: +\_sudo modprobe -v vfio-pci +sudo modprobe -r vfio\_iommu\_type1 +sudo modprobe -v vfio\_iommu\_type1 allow\_unsafe\_interrupts=1* + +#### VFIO-PCI driver binding + +The following command lines unbind the native e1000e driver and bind the vfio-pci driver instead: + +*echo vfio-pci > /sys/bus/pci/devices/0000:01:00.0/driver\_override* +*echo 0000:01:00.0 > /sys/bus/pci/drivers/e1000e/unbind* +*echo 0000:01:00.0 > /sys/bus/pci/drivers\_probe* + +#### QEMU command line example + +\_qemu-system-aarch64 -M virt -smp 4 -m 12G -cpu host -serial stdio -display none \_ +\_--enable-kvm -kernel /root/VM/Image \_ +\_-drive if=none,cache=writethrough,file=/root/VM/ubuntu10.img,format=raw,id=guestrootfs \_ +\_-device virtio-blk-device,drive=guestrootfs \_ +*-net none \_ +\*\**-device vfio-pci,host=01:00.0\*\*\* \ +\*-append 'loglevel=8 root=/dev/vda rw console=ttyAMA0 earlyprintk ip=dhcp'\_ + +## X540-T2 (SR-IOV capable) Assignment + +#### Host Compilation + +**ACS Capability Override** +PCIe ACS capability (Access Control Service) is not properly exposed on this HW. + +Without any action the PF (physical function) and all the VFs (virtual functions) belong to the same iommu group. This prevents from assigning the VF since the vfio group is not viable (the PF must remain bound to the ixgbe native driver else the VFs disappear). + +This problem is pretty well know on other architectures too. There is a patch available to hack and workaround the issue but this one most probably will never been upstreamed:[ https://lkml.org/lkml/2013/5/30/513](https://lkml.org/lkml/2013/5/30/513). At least it makes possible to experience SR-IOV passthrough. + +After applying the patch and adding *pcie\_acs\_override=downstream* to the grub cmd line the PF/VF are in separate iommu groups. + +#### **ixgbe Module Addition** + +Compile the ixgbe as a module (needed to turn VFs on) by adding the following options: + +*scripts/config -m CONFIG\_IXGB* +*scripts/config -m CONFIG\_IXGBE* +*scripts/config -m CONFIG\_IXGBEVF* + +Host PCIe Topology +**Before SR-IOV enabling:** +***00:00.0 0600: 1022:1a00*** +       *Subsystem: 1022:1a00* +*00:02.0 0600: 1022:1a01* +*00:02.2 0604: 1022:1a02* +       *Kernel driver in use: pcieport* +*01:00.0 0200: 8086:1528 (rev 01)* +       *Subsystem: 8086:0002* +       *Kernel driver in use: ixgbe* + +**SR-IOV enabling:** + +reload the ixgbe module with max\_vfs parameter set to the wished number of virtual functions: + +*modprobe -r ixgbe* +*modprobe ixgbe max\_vfs=2* + +Now the PCIe topology looks like: + +*-\[0000:00]-+-00.0* +          *+-02.0* +          *-02.2-\[01]--+-00.0* +                       *+-10.0* +                       *-10.2* + +*00:00.0 0600: 1022:1a00* +       *Subsystem: 1022:1a00* +*00:02.0 0600: 1022:1a01* +*00:02.2 0604: 1022:1a02* +       *Kernel driver in use: pcieport* +*01:00.0 0200: 8086:1528 (rev 01) eth4* +       *Subsystem: 8086:0002* +       \*Kernel driver in use: ixgbe +**\*\_01:10.0 0200: 8086:1515 (rev 01) +       \_**Subsystem: 8086:0002\*\*\_ +       \_**Kernel driver in use: ixgbevf**\_ +\_**01:10.2 0200: 8086:1515 (rev 01)**\_ +      \_**Subsystem: 8086:0002v**\_  +      \_\*\*Kernel driver in use: ixgbevf\*\*\_ + +#### Allow Unsafe Interrupts + +*sudo modprobe -v vfio-pci +sudo modprobe -r vfio\_iommu\_type1 +sudo modprobe -v vfio\_iommu\_type1 allow\_unsafe\_interrupts=1* + +#### Physical Function Enable + +The PF must be enabled before assigning the VFs. +ifconfig eth4 up + +#### VFIO-PCI driver binding + +unbind the ixgbevf native driver and bind vfio-pci driver instead: + +echo vfio-pci > /sys/bus/pci/devices/0000:01:10.0/driver\_override +echo 0000:01:10.0 > /sys/bus/pci/drivers/ixgbevf/unbind +echo 0000:01:10.0 > /sys/bus/pci/drivers\_probe + +#### QEMU Command line + +\_qemu-system-aarch64 -M virt -smp 4 -m 4096 -cpu host -serial stdio -display none \_ +\_--enable-kvm -kernel /root/VM/Image1 \_ +\_-drive if=none,cache=writethrough,file=/root/VM/ubuntu10.img,format=raw,id=guestrootfs -device virtio-blk-device,drive=guestrootfs \_ +\_-net none **-device vfio-pci,host=01:10.0** \_ +*-append 'loglevel=8 root=/dev/vda rw console=ttyAMA0 earlyprintk ip=dhcp'* + +# References + +## Documents + +\[1] Server Base System Architecture, (SBSA): [http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0029/index.html](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0029/index.html) + +\[2] GICv3 architecture Specification + +\[3] GICv3 Software Overview, [http://infocenter.arm.com/help/topic/com.arm.doc.dai0492a/GICv3\_Software\_Overview\_Official\_Release\_A.pdf](http://infocenter.arm.com/help/topic/com.arm.doc.dai0492a/GICv3_Software_Overview_Official_Release_A.pdf) + +\[4] Intel® Virtualization Technology for Directed I/O (Architecture Specification):  [http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf](http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf) + +## Kernel & QEMU Series + +\[5] kernel series: KVM PCIe/MSI passthrough on Arm/Arm64 [https://lkml.org/lkml/2016/2/12/47](https://lkml.org/lkml/2016/2/12/47) + +\[6] QEMU series: \[RFC v2 0/8] KVM PCI/MSI passthrough with mach-virt  [http://lists.gnu.org/archive/html/qemu-arm/2016-01/msg00444.html](http://lists.gnu.org/archive/html/qemu-arm/2016-01/msg00444.html) diff --git a/src/content/blogs/latest-linaro-gcc-toolchain-release-supports-full-range-arm-cortex-processors.mdx b/src/content/blogs/latest-linaro-gcc-toolchain-release-supports-full-range-arm-cortex-processors.mdx new file mode 100644 index 0000000..b0cf355 --- /dev/null +++ b/src/content/blogs/latest-linaro-gcc-toolchain-release-supports-full-range-arm-cortex-processors.mdx @@ -0,0 +1,32 @@ +--- +title: New GCC Toolchain Releases for Arm Cortex-A Processors +description: Linaro announces the release the GCC Toolchain which offers full + support for the Arm CortexTM-A processors family. Read more here. +image: linaro-website/images/blog/Banner_Linux_Kernel +author: linaro +date: 2012-02-09T12:18:24.000Z +link: /news/latest-linaro-gcc-toolchain-release-supports-full-range-arm-cortex-processors/ +tags: [] +related: [] + +--- + +SAN FRANCISCO - 9 FEB 2012 + +Linaro™, the not-for-profit engineering organization consolidating and optimizing open source software for the Arm® architecture, announced today the 2012.02 release of Linaro GCC 4.6 and Linaro GCC 4.5 which offers full support for the Arm CortexTM-A processors family including the Cortex-A5, Cortex-A7, Cortex-A8, Cortex-A9 and Cortex-A15. This new release helps Linaro continue to provide companies with a foundation on which they can rapidly build and deliver innovative, differentiated solutions. + +The Linaro Toolchain Working Group finalized this release during open Linaro Connect sessions on Wednesday February 8th. Over 200 engineers from more than 40 companies attended a week of sessions, planning future work and discussing challenges facing the Linux community developing solutions on the Arm architecture. + +Accompanying the release are complete notes, details of bug fixes and links to the source tarballs. Downloads are available from the Linaro GCC page on Launchpad. + +Linaro GCC 4.6 2012.02 is the twelfth release in the 4.6 series. Based on the latest GCC 4.6.2+svn183786, it contains bug fixes and backports Cortex-A7 and Cortex-A15 support from the FSF (Free Software Foundation) trunk. In addition to the updates to 4.6.2+svn183786 and initial Cortex-A7 support, the most interesting changes in this release are the backporting of Cortex-A15 tuning improvements from upstream and improvements to 64-bit unsigned comparisons. + +Linaro GCC 4.5 2012.02 is the eighteenth release in the 4.5 series. Based on the latest GCC 4.5.3+svn183785, it is a maintenance only release. + +These toolchains are the latest downloads enabling Linaro members to develop advanced products and are part of its standard monthly release cycle. The Linaro Toolchain Working Group release occurs two weeks before Linaro's main release day which is scheduled for 16:00 UTC on Thursday February 23rd. + +The releases include versions of Linaro's binary platform images, component sources, board support packages and infrastructure/validation tools to ensure that Linaro output can be consumed most easily by the Arm development community, product builders and enthusiasts. + +## Join us at Linaro Connect + +Linaro Connect is held every three to four months to bring the Linux on Arm community together to work on the latest system-on-chip (SoC) developments, plan new engineering efforts and hold engineering hacking sessions. These events give the Linux community an opportunity to be a part of the Linaro team and help to define the Arm tools, Linux kernels and builds of key Linux distributions including Android and Ubuntu on member SoCs. Join us for the current event February 6-10th in San Francisco, California or the Q2 Linaro Connect May 28th - June 1st in Hong Kong. diff --git a/src/content/blogs/lava-fundamentals.mdx b/src/content/blogs/lava-fundamentals.mdx new file mode 100644 index 0000000..c0c7c69 --- /dev/null +++ b/src/content/blogs/lava-fundamentals.mdx @@ -0,0 +1,59 @@ +--- +title: LAVA Fundamentals +description: In this article, Alan Bennett provides an overview of the LAVA ( + Linaro Automation and Validation Architecture) fundamentals. Read more here! +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +tags: + - toolchain +author: alan-bennett +date: 2011-08-12T21:24:00.000Z +link: /blog/lava-blog/lava-fundamentals/ +related: [] + +--- + +This blog post talks about the core topics that LAVA deals with, dives into the architecture of the stack as it exists today and provides some background rationale for the design choices we made. If you are not familiar with LAVA you may want to read the introduction first. + +LAVA is a broad project and to make it more manageable for day-to-day engineering we have separated it into a collection of smaller projects, focused on a narrow topic. Today LAVA has the following key sub-projects: + +* [LAVA Test](https://launchpad.net/lava-test) is a framework and a command line tool for wrapping existing test programs and translating results into a common format. + +* [LAVA Dispatcher](https://launchpad.net/lava-dispatcher) is a framework and a command line tool for controlling test execution on a particular machine, including deployment of the initial environment. + +* [LAVA Dashboard](https://launchpad.net/lava-dashboard) is a web application for storing and viewing test results. It also has a simple data mining and reporting features. + +* [LAVA Scheduler](https://launchpad.net/lava-scheduler) is another web application for managing a collection of devices and processing a queue of test jobs to perform. + +There are many more smaller parts, all of which you can see on the [LAVA project page](http://launchpad.net/lava) but I will not cover them directly here. To explain how the four key pieces work I will take you back to the May 2010 when this project started. + +Originally the Validation / QA effort started out as two small projects: launch-control and abrek. The former was responsible for the so-called "validation dashboard" - a web application that makes sense of test results while the latter was a test execution/wrapper tool that allowed us to run an existing test, gather the result and upload it to the dashboard. + +Abrek would allow you to customize how a test is installed, how to invoke and finally how to translate between the text output of the test program and the dashboard data format. Simple tests take a few minutes to *wrap* with this framework and we quickly got a few interesting, open source tests and benchmarks integrated. + +During that early period one of the most fundamental interface was formed (that is still around today) that is, the concept of *dashboard bundles*, files containing machine-readable test results. We investigated existing formats but after a lot of mind-storming and thought experiments we realized that none of them could express the kind of data that we were interested in. We used plain text files with hierarchical data structure encoded as JSON. The idea was IMHO quite interesting because unlike virtually all other consolidated QA or CI systems that I came across before the actual data can be freely created, edited and moved around by conventional means and is not trapped in a complex database hidden behind proprietary APIs. A bundle can be written by any program that can output text, transmitted between systems by email, thumb drive or anything in between, uploaded to a dashboard, downloaded and re-sent somewhere else for processing. + +Each bundle would contain one or more "test run" that describes the outcome of running a test somewhere. In addition to storing typical "pass/fail/skip/etc" status we wanted to be able to representing benchmarks and performance measurements. As soon as you think of benchmarks you will find the need to store the hardware and software context that describe the environment in which the test was performed. To formalize the format and validate bundles for correctness we have used JSON Schema (http://tools.ietf.org/html/draft-zyp-json-schema-02). You can [read the actual schema](http://bazaar.launchpad.net/~linaro-validation/linaro-python-dashboard-bundle/trunk/changes) if you are interested in that level of detail. The schema is maintained alongside helper APIs in a library called [linaro-python-dashboard-bundle](https://launchpad.net/linaro-python-dashboard-bundle) (I really wish we could simple drop the "python" part now). Apart from what I already mentioned bundles can have arbitrary text or binary attachments, key-value attributes, references to code branches and a few other interesting features. So far we managed to map all of the tests and results we encountered in a meaningful way but if you think that, based on your experience, something is missing do let us know - the earlier we know the better. + +If you are interested in looking at some real-life bundles you can see them using the integrated bundle viewer built into the dashboard. Note, that while bundle format is well-defined, various programs generate *different bundles*: LAVA dispatcher uses one bundle to store results of all the tests that were ran in one go on a single system ([example]()http://validation.linaro.org/lava-server/dashboard/streams/anonymous/lava-daily/bundles/bea57bc187496dda60a21432934b800712e5b920/), remember to click on bundle viewer tab), a small script that processes measurements from various benchmarks built with specific version of the toolchain stores source code references and uses external attachments not to duplicate large amounts of data ([example]()http://validation.linaro.org/lava-server/dashboard/streams/anonymous/gcc/bundles/04e0bd44704435721a384fb615ef6aea42570520/) while Abrek introspects software and hardware context for a particular test ([example]()http://validation.linaro.org/lava-server/dashboard/streams/anonymous/zyga/bundles/826e8c18b519e40db6aa51c22c65a0f2f62146da/). + +This Abrek/Launch Control duo was the smallest building block that allowed us to do testing and retrieve the results later. But that's not the full story today. + +*Background note: a few months later Paul Larson invented the fantastic project name that we use today and we gradually transitioned from launch-control to LAVA Dashboard and from Abrek to LAVA Test.* + +To make testing reliable and predictable we wanted to run each test in a clean, pristine environment. This also allowed us to do simple Continuous Integration on the daily Linaro images that were being produced by various parts of the build system created by our infrastructure team. The details of how we actually do that are interesting but not essential here. The idea is to describe which image to deploy, which tests to install and run and pass this data to the [LAVA Dispatcher](https://launchpad.net/lava-dispatcher). The dispatcher would then encapsulate all the magic of automating the deployment. In theory anyone could implement the required APIs to have dispatchers running tests on development boards, silicon simulators, QEMU, virtual machines, off-the-shelf x86 boxes, servers and laptops. All that matters is the machine readable description of the test job. + +While I purposefully skip the details, this was and still is one of the most challenging part of LAVA. Doing automated deployment and recovery is **hard**. There are lots of practical problems to solve, unexpected issues and hardware mis-design side-effects that make it very difficult to get this right. The details warrant a separate blog post which I will surely write. You may want to look at [example Android test job](https://validation.linaro.org/scheduler/) and [example Ubuntu test job](https://validation.linaro.org/scheduler/) straight from the history of our public scheduler instance. + +As long as you have one board or other device and are happy with using lava-dispatcher to describe and deploy your test environment, lava-test to execute the test program and lava-dashboard to store and visualize the results you would not need more components. Since we were tasked with building and maintaining the Validation Lab that holds many different development boards, from all the Linaro members, we had to have another component that would allow us to manage the pool of available devices. This component is known as the LAVA Scheduler. + +The scheduler has an extremely simple API: you can submit a job that will be passed down to the dispatcher that is responsible for a particular board. Apart from describing the actual test you can also specify which board or board class to use. Simple as that. + +As we were developing the scheduler we quickly noticed that, as a web application, it would have to copy or somehow share various parts of the dashboard code base to get the level of integration we wanted. We have thus decided to split the dashboard into two parts: the actual dashboard-specific code and the generic webapp plumbing that allows us to reuse UI elements, RPC services, user database and a few other things. This component is now called LAVA Server. Whenever you install any of the web-facing parts you will also get the server to host them. In retrospective this decision was very good as it allowed us to quickly add new applications to the server and simply not have to worry about deployment complexity or integration issues. + +As our server-side parts offer XML-RPC APIs we wanted to allow people to use them directly from the command line for experiments or simple scripting. We have created a set of command-line tools, called lava-*something*-tool by convention, that expose each XML-RPC method of the corresponding server-side component. + +Today we have two such tools: lava-dashboard-tool and lava-scheduler-tool. Since we liked the concept lava-server we did the same thing here and wrote lava-tool to have a consistent command line tool framework. + +This covers the core parts of LAVA. Remember that you can quickly install everything, apart from the dispatcher, from [our PPA](https://launchpad.net/~linaro-validation/+archive/ppa). You may also be interested in the server side API documentation by looking at the [API help page](http://validation.linaro.org/lava-server/api/help/) of our public LAVA instance. + +In the next installment I will take a closer look at LAVA Test and show you how to wrap a simple test program so that LAVA can use it. diff --git a/src/content/blogs/lava-master-images.mdx b/src/content/blogs/lava-master-images.mdx new file mode 100644 index 0000000..96cdd62 --- /dev/null +++ b/src/content/blogs/lava-master-images.mdx @@ -0,0 +1,25 @@ +--- +author: alan-bennett +date: 2011-11-30T21:44:03.000Z +link: /blog/lava-master-images/ +description: I can script the creation of the rootfs used by LAVA. It's much easier once file systems and partition alignment are out of the equation. It essentially boils down to getting an arbitrary released rootfs + hwpack. Running a small script against both on your host (so it's something that LAVA can do, thanks to celery). The process has to modify a few places, namely network configuration, u-boot script, and initial ram disk. +published: false +title: LAVA master images +tags: [] +related: [] + +--- + +There is a small partition you need to put on your SD card. The partition I've used is 64MB but it could well be 16 or less. This place is used to pull in a kernel and the initrd. Both can be (alternative) provided by tftp but since LAVA does not manage tftp just yet I wanted to avoid this step. There is a specially crafted boot script that knows where to get the root filesystem from. This means that each board has a different boot image. Sadly this is the case for the moment, while we could easily provide those options remotely there are two important pieces that need additional configuration (mac address is one of them) + +The board gets a consistent, non-cloned, MAC address on the network card. This is a new feature as it seems all our boards get the same address if simply cloned from a single SD card (go figure!). I tested this on panda and at least there there is no good place to keep the hardware address. We've had issues with duplicate UUIDs generated based on, hold it, time and mac address. That's right, time was not set and mac was all the same everywhere. No more! + +The card can store a *LAVA identity profile* for the board. This is actually not implemented yet but will be very significant soon. This means that we can differentiate between boards directly in the boot loader. We can also look it up at runtime (when Linux has booted) if required. + +I can keep the rootfs on my home machine, away from the fragile SD card. The rootfs can be reverted to a snapshot on each boot if required. This is even more important as we grow as I want LAVA to be able to generate master root filesystems for **all** supported boards automatically. The process of adding a new board can then be simplified to downloading a small SD card image (that LAVA generates) and copying it to the board. LAVA will build (or download if that is faster) a master image. No more random images, no more cryptic partition setups. + +We can support netblock device that some people tell me is much better than NFS for this use case. With some additional magic we could keep a single immutable image that would fuel any number of boards. + +We can start experimenting with putting the boot loader remotely. This would simplify our "master image" that we need to maintain on the card to just the *LAVA identity profile*. The boot loader, the kernel and the root filesystem would be then downloaded remotely (in stages). With some clever engineering we could actually store the identity file in some unused blocks of the card (I'm sure there is a place where nothing needs to look, the identity is just an UUID after all, we could squeeze it in a crafted header if we really wanted). We could then use the entire SD card to test verbatim images of any kind (android and ubuntu alike). + +I can script the creation of the rootfs used by LAVA. It's much easier once file systems and partition alignment are out of the equation. It essentially boils down to getting an arbitrary released rootfs + hwpack. Running a small script against both on your host (so it's something that LAVA can do, thanks to celery). The process has to modify a few places, namely network configuration, u-boot script, and initial ram disk. diff --git a/src/content/blogs/leaders-digital-home-solutions-collaborate-linaro-arm-linux-platforms.mdx b/src/content/blogs/leaders-digital-home-solutions-collaborate-linaro-arm-linux-platforms.mdx new file mode 100644 index 0000000..c720448 --- /dev/null +++ b/src/content/blogs/leaders-digital-home-solutions-collaborate-linaro-arm-linux-platforms.mdx @@ -0,0 +1,91 @@ +--- +title: "Leaders in Digital Home Solutions Collaborate with Linaro on Arm Linux + Platforms " +image: linaro-website/images/blog/tech_background__under_2mb +author: linaro +date: 2014-05-29T14:59:15.000Z +tags: + - linux-kernel + - open-source +link: /news/leaders-digital-home-solutions-collaborate-linaro-arm-linux-platforms/ +description: > + Industry leaders Allwinner, Arm, Cisco, Comcast, Fujitsu, Hisilicon, + STMicroelectronics and ZTE have joined Linaro's new Segment Group focused on + accelerating open source development for Arm processors in digital home + applications +related: [] + +--- + +## Industry leaders Allwinner, Arm, Cisco, Comcast, Fujitsu, Hisilicon, STMicroelectronics and ZTE have joined Linaro’s new Segment Group focused on accelerating open source development for Arm processors in digital home applications + +CAMBRIDGE, UK; 29 MAY 2014 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced the formation of the [Linaro Digital Home Group (LHG) ](https://wiki-archive.linaro.org/LHG)with founding member companies Allwinner Technology, Arm, Cisco, Comcast, Fujitsu Semiconductor, Hisilicon Technologies, STMicroelectronics and ZTE. + +Building on its collaboration model used by server and networking industry leaders in the [Linaro Enterprise Group (LEG)](https://wiki-archive.linaro.org/LEG) and the [Linaro Networking Group (LNG),](https://wiki-archive.linaro.org/LNG) Linaro has brought leaders in the digital home market together in the Linaro Digital Home Group (LHG), sharing engineering effort and delivering software to relevant upstream open source projects. These leaders include SoC vendors, OEMs and operators and they will work together in the group on digital home applications, including set-top boxes, televisions, media players, gaming and home gateway devices. + +Consumers can already instantly view streaming video content on a massive diversity of digital home devices capable of receiving broadcast, on-demand, and time shifted content. These devices function as home gateways and IP clients to access broadband and Pay TV services and are capable of managing content rights throughout the connected home. Viewers expect these devices to deliver rich 3D graphical user interfaces, access to their favorite applications, and the ability to watch and record programs all while operating on lower standby and active power. Many standards exist, but these are not implemented consistently across all platforms and devices, leading to significant fragmentation, a multitude of point solutions and subsequently significant amounts of duplicated, non-differentiating engineering effort. + +“Linaro has been collaborating with Arm, Comcast, Hisilicon and STMicroelectronics on the RDK (Reference Design Kit) for the last year,” said George Grey, CEO of Linaro. “The Linaro Digital Home Group will build on this effort and expand scope to working on different Linux-based platforms used in the Digital Home segment. We look forward to continuing our work with SoC vendors, equipment manufacturers and members of the software ecosystem in building and maintaining world-class open source foundation software for this market.” + +Members of LHG will collaborate on fundamental software platforms to enable rapid deployment of new services across a range of digital home platforms. Developing the base platform for diverse and complex multimedia applications requires a significant amount of software that addresses common challenges. LHG will deliver this as an enhanced core Linux platform for digital home devices. Linaro has been providing common core software for Arm-Powered®, Linux-based mobile devices since June 2010 with recognized success, and it has built on the collaborative working model that it has created to form special groups focusing on the particular industry segments. + +The LHG steering committee has selected the following key initiatives for the software engineering effort: + +1. A common core Linux platform. The Linaro Stable Kernel (LSK) is based on the kernel.org long-term supported (LTS) kernel. LHG will leverage this with a Group-focused baseline and add features such as DRM (digital rights management), DLNA (Digital Living Network Alliance) and CVP-2 (Commercial Video Profile 2). LHG will provide a core Linux platform build with versions to support the base layer of the RDK (Reference Design Kit), Android-based products, and manufacturer-specific Linux-based products. The LHG platform will support different vendor applications and user interfaces. +2. Development of improved media framework APIs. LHG will work to establish standardized APIs to different media hardware, codecs, accelerators, and other peripheral functions across multiple members’ SoCs to improve middleware portability +3. Development of a standard media security platform based on Arm Trustzone® technology. This will deliver an open source implementation of the W3C Encrypted Media Extensions (EME) standard for TrustZone-based Arm SoCs. +4. Integration of key open source standards-based software. The LHG steering committee will identify key open sourced standards to be integrated by the group’s engineering team. Items already under discussion include optimized HTML5 support and DLNA CVP-2. + +As with LEG and LNG, LHG will utilize output from Linaro’s core engineering group and will have a representative on the Linaro Technical Steering Committee (TSC). Key shared areas include the Linaro Automated Validation Architecture (LAVA) test and continuous integration (CI) farm for member SoC enablement and validation, multicore power management, virtualization and Armv8 64-bit development\*\*.\*\* + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: its goal is to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The majority of Linaro’s engineering work is open to all online. To find out more, please visit [](/). + +**Linaro Home Group (LHG) Founding Member Testimonials** + +**Allwinner Technology** + +“As a worldwide leader in application processor shipments in Android, Allwinner is excited by the collaborative opportunity to work within Linaro to bring tip-of-tree features to Arm platforms”, said Jack Lee, CMO of Allwinner.  “OTT home entertainment is a space that we’re very focused on, given the massive growth from our key markets in China, US, and Europe.  We expect LHG will enable a stronger open source software ecosystem in OTT/IPTV, and in effect enable better Arm-powered devices for consumers in our key markets”. + +*About Allwinner:* Allwinner Technology is a leading fabless design company dedicated to smart application processor SoCs and smart analog ICs. Its product line includes multi-core application processors for smart devices and smart power management ICs used by brands worldwide. + +With its focus on cutting edge UHD video processing, high performance multi-core CPU/GPU integration, and ultra-low power consumption, Allwinner Technology is a mainstream solution provider for the global tablet, internet TV, smart home device, automotive in-dash device, smart power management, and mobile connected device markets. Allwinner Technology is headquartered in Zhuhai, China. See www.allwinnertech.com for more information. Follow Allwinner on Twitter @AllwinnerTech. Media contact: service@allwinnertech.com + +**Arm** + +“The convergence of mobile and home entertainment experiences creates an opportunity for Pay-TV operators, device manufacturers and others to leverage the velocity of innovation and economies of scale of the mobile ecosystem”, said Charlene Marini, vice president of marketing, embedded segments, Arm. “Linaro Home Group accelerates this reality by providing a software platform fully optimized for Arm technologies, including Arm TrustZone, to enable exciting premium content based services on the next generation connected home platforms.” + +*About Arm:* [Arm](http://www.arm.com/) is at the heart of the world’s most advanced digital products. Our technology enables the creation of new markets and transformation of industries and society. We design scalable, energy efficient-processors and related technologies to deliver the intelligence in applications ranging from sensors to servers, including smartphones, tablets, enterprise infrastructure and the Internet of Things. + +Our innovative technology is licensed by Arm Partners who have shipped more than 50 billion Systems on Chip (SoCs) containing our intellectual property since the company began in 1990. Together with our Connected Community, we are breaking down barriers to innovation for developers, designers and engineers, ensuring a fast, reliable route to market for leading electronics companies.  Learn more and join the conversation at [http://community.arm.com](http://community.arm.com/). + +**Cisco** + +“Delivering new video services at Internet speed through the Cloud is a reality, but we need to reduce operational complexity and fragmentation in core engineering work,” said Dr Ken Morse, CTO, Connected Devices, Cisco. “As a founding member of the Linaro Digital Home Group, we are utilizing Linaro’s shared engineering model to reduce our cost of enablement for the Arm platform, minimize complexity and help accelerate time to market for new Cisco connected home products and services.” + +*About Cisco:* Cisco is the worldwide leader in IT that helps companies seize the opportunities of tomorrow by proving that amazing things can happen when you connect the previously unconnected. For ongoing news, please go to [http://thenetwork.cisco.com](https://thenetwork.cisco.com/c/r/newsroom/en/us/index.html). + +**Comcast** + +“The close collaboration between Arm and Linaro is a critical component for the Arm eco-system,” said Sree Kotay, Chief Software Architect for Comcast. “We are looking forward to the Linaro Digital Home Group enabling the Arm platform for our Comcast devices.  This is an important step to Arm extending beyond closed cell phone operating systems.” + +*About Comcast Cable:* Comcast Cable is the nation's largest video, high-speed Internet and phone provider to residential customers under the XFINITY brand and also provides these services to businesses. Comcast has invested in technology to build an advanced network that delivers among the fastest broadband speeds, and brings customers personalized video, communications and home management offerings. Comcast Corporation (Nasdaq: CMCSA, CMCSK) is a global media and technology company. Visit www.comcastcorporation.com for more information. + +**STMicroelectronics** + +“Linaro is a proven place to boost innovation around Linux in different market segments. As a founding member company of the Linaro Digital Home Group, STMicroelectronics is collaborating with Linaro to extend this success to the Digital Home segment including the Gateway and client markets,” said Christophe Lorieau, Director System, Software & Customer Support. “ST is working with Linaro on fundamental elements of a Linux software platform leveraging our continuing efforts with Linaro core groups including security through our open-source TEE (Trusted Execution Environment)." + +*About STMicroelectronics:* ST is a global leader in the semiconductor market serving customers across the spectrum of sense and power and automotive products and embedded processing solutions. From energy management and savings to trust and data security, from healthcare and wellness to smart consumer devices, in the home, car and office, at work and at play, ST is found everywhere microelectronics make a positive and innovative contribution to people’s life. By getting more from technology to get more from life, ST stands for life.augmented. [www.st.com](https://www.st.com/content/st_com/en.html) + +**ZTE** + +“We are honored to be one of the founding members of the Linaro Digital Home Group. ZTE has achieved excellent marketing and technical performance benefits by offering home-multimedia entertainment, home-network storage and smart home solutions,” said Jill Guo, Strategic Planning Director, Ecosytem Partnership, ZTE Planning & Design Center. “In joining this new group, ZTE has strong confidence to provide products and solutions with better energy conservation and environmental protection, and an improved and richer user experience!” + +*About ZTE:* ZTE is ranked in the top three vendors globally both for home gateways and set-top box products, and has very actively participated in the construction and development of China smart city cases. As the largest publicly-listed provider of telecommunications equipment in China, and a globally-leading manufacturer of smartphones, ZTE is a member of more than 70 global industry organizations and forums. ZTE has filed applications for more than 50,000 international patents, with over 14,000 already granted, and was ranked number one globally in international patent applications in 2011 and 2012 by the World Intellectual Property Organization. + +To keep the company at the forefront of global technology development, ZTE is strengthening research and development of key technologies such as mobile communications, smart devices, optical networking, cloud computing and big data, as well as next-generation technologies such as 5G mobile communications. ZTE is committed to working with partners in the technology industry on an open and mutually beneficial basis, in order to promote the healthy and sustainable development of the industry. diff --git a/src/content/blogs/leading-china-smartphone-innovator-meizu-becomes-first-oem-to-join-linaro-mobile-group.mdx b/src/content/blogs/leading-china-smartphone-innovator-meizu-becomes-first-oem-to-join-linaro-mobile-group.mdx new file mode 100644 index 0000000..07e0a20 --- /dev/null +++ b/src/content/blogs/leading-china-smartphone-innovator-meizu-becomes-first-oem-to-join-linaro-mobile-group.mdx @@ -0,0 +1,35 @@ +--- +excerpt: Meizu becomes first OEM to join Linaro Mobile Group. As a current and + potential customer of several of Linaro’s silicon supplier members, Meizu will + take on a unique role in helping define the engineering activities of LMG and + our contributions to open source projects in the mobile space. +author: linaro +description: Meizu, a leading Chinese smartphone innovator, becomes first OEM to + join the Linaro Mobile Group. Read more here. +date: 2015-08-03T11:26:09.000Z +comments: false +title: Meizu becomes first OEM to join Linaro Mobile Group +tags: [] +link: /news/leading-china-smartphone-innovator-meizu-becomes-first-oem-to-join-linaro-mobile-group/ +image: linaro-website/images/blog/37319206961_0b863ab87d_k +related: [] + +--- + +Cambridge, UK; 3 August 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that Meizu Technology Company Ltd has become the first OEM to join the Linaro Mobile Group (LMG). + +LMG was formed in July 2014 to consolidate and optimize open source software for Arm powered mobile phones, tablets, laptops and wearables. The Group's engineers work on the Android Open Source Project (AOSP), Performance and Power optimizations, Graphics and GPGPU, and work closely with other groups in Linaro Core Engineering on other open source technologies. Meizu's membership of LMG is significant because it can directly represent the needs of today's mobile phone manufacturer in a group that includes silicon and software vendors. + +“We are delighted to welcome Meizu as the first pure OEM handset maker to become a member of the Linaro Mobile Group”, said Joe Bates, EVP of Linaro Member Services. “As a current and potential customer of several of Linaro’s silicon supplier members, Meizu will take on a unique role in helping define the engineering activities of LMG and our contributions to open source projects in the mobile space.” + +“Meizu designs and produces smartphones that provide a simple, intuitive mobile experience” said Liang Dongming, CTO of Meizu. “Open source software is critical to enabling Meizu to provide this experience built on a proven technology foundation and we are committed to working with Linaro and its members worldwide to accelerate open source innovation on the Arm platform.” + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company is a collaborative engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [ ]()and [http://www.96Boards.org](https://www.96boards.org/). + +**About Meizu** +Meizu, one of the top ten smartphone brands in China, is a trailblazer in the innovation and design of smartphones, and always presents graceful and user-friendly devices. Established in 2003 and headquartered in Zhuhai, China, Meizu expanded into the smartphone market in 2008 and has been committed to developing high-end smartphones ever since. Based on the business philosophy and commitment to pursuing perfection, Meizu remains focused on developing innovative and user-friendly smartphones which are characterized by state-of-the-art craftsmanship, powerful specifications and an attractive price. With more than 2000 retail stores, Meizu has built a global presence in Hong Kong, India, EU, Israel, Russia and Ukraine. For more information, please visit [www.meizu.com/en](http://www.meizu.com/en). diff --git a/src/content/blogs/lg-electronics-joins-linaro.mdx b/src/content/blogs/lg-electronics-joins-linaro.mdx new file mode 100644 index 0000000..61cce2b --- /dev/null +++ b/src/content/blogs/lg-electronics-joins-linaro.mdx @@ -0,0 +1,50 @@ +--- +author: linaro +date: 2012-10-26T11:19:55.000Z +description: SEOUL, KOREA AND CAMBRIDGE, UK - 26 OCT 2012 +link: /news/lg-electronics-joins-linaro/ +title: LG Electronics Joins Linaro +tags: [] +related: [] + +--- + +SEOUL, KOREA AND CAMBRIDGE, UK - 26 OCT 2012 + +Consumer electronics leader LG Electronics and Linaro, the not-for-profit engineering organization develop-ing open source soft-ware for the Arm architecture, today announced that LG will join Linaro to cooperate on new Arm technologies. + +LG will contribute resources to work together with the resources from existing Linaro members. This shared team of over 100 engineers is directed by a Technical Steering Committee (TSC), which now includes LG. Through the TSC, Linaro members are de-fining the future of Linux on Arm. Accelerated time to market for new Arm technologies is achieved by working together within Linaro on shared solutions, while focusing members' in-house engineering resources on differentiation. + +Linaro uses a unique business model where multiple companies create core open source software once with a shared investment in a single software engineering team, rather than by creating multiple, fragmented software solutions in isolation. Membership delivers an immediate return on investment as new members get immediate access to a significant engineering team. + +"We are pleased to join Linaro as its newest member,” said Bo-ik Sohn, Senior Vice President for LG Electronics R\&D Lab. “We will take an active role in the organization and actively cooperate on new Arm technologies with other Linaro members." + +"We are very pleased to welcome LG Electronics as a Linaro member," said George Grey, Linaro CEO. "With Linaro continuing to enable shared member investment on consolidation and optimization of Linux and Android on Arm, as well as working on new Arm technologies such as big.LITTLE and next-generation 64-bit devices, we are excited to have LG Electronics working closely with us." + +**About LG Electronics, Inc.** + +LG Electronics, Inc. (KSE: 066570.KS) is a global leader and technology innovator in con-sumer electronics, mobile communications and home appliances. With 117 operations around the world, LG achieved global sales of USD 49 billion (KRW 54.26 trillion) in 2011. LG comprises four business units – Home Entertainment, Mobile Communications, Home Appliance, and Air Conditioning & Energy Solution – and is one of the world’s leading pro-ducers of flat panel TVs, mobile devices, air conditioners, washing machines and refrigerators. LG Electronics is a 2012 ENERGY STAR Partner of the Year. For more news and information on LG Electronics, please visit http://www.LGnewsroom.com. + +**About Linaro** + +Linaro is the place where engineers from the world's leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 120 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best soft-ware foundations to everyone, and to reduce non-differentiating and costly low level frag-mentation. + +To ensure commercial quality software, Linaro's work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro's engineering work is open to all online. To find out more, please visit . + +**Media Contacts:** + +*LG Electronics, Inc.* + +**Ken Hong** + +LGnews@lge.com + +www.LGnewsroom.com + +***Linaro Ltd*** + +Steve Taylor + +media@linaro.org + +www.linaro.org/news/pr diff --git a/src/content/blogs/linaro-1-contributor-linux-kernel-4-9-release.mdx b/src/content/blogs/linaro-1-contributor-linux-kernel-4-9-release.mdx new file mode 100644 index 0000000..de7476a --- /dev/null +++ b/src/content/blogs/linaro-1-contributor-linux-kernel-4-9-release.mdx @@ -0,0 +1,19 @@ +--- +author: linaro +date: 2016-12-21T12:37:28.000Z +link: /blog/linaro-1-contributor-linux-kernel-4-9-release/ +title: "Linaro #1 contributor to the Linux kernel 4.9 release" +description: Linaro has been a long-standing top 5 company contributor to Linux kernel development. With the release of Linux kernel 4.9, Linaro has for the first time made the top position, measured by number of changesets. +tags: [] +related: [] + +--- + +Linaro has been a long-standing top 5 company contributor to Linux kernel development. +With the [release](https://www.linux.com/news/linux-kernel-49-here-and-its-largest-release-ever) of Linux kernel 4.9, Linaro has for the first time made the top position, measured by number of changesets. Linaro was the [most active employer](https://lwn.net/Articles/708266/) with 1,876 changesets, due mainly to the integration of Greybus into Linux. Greybus was developed as part of Google ATAP’s Project Ara modular phone effort. The top three contributors for this release -- John Hovold, Viresh Kumar and Alex Elder -- worked in Linaro on the project with Greg Kroah-Hartman. + +Greybus is a framework that allows the main processor on a portable device (i.e., a phone or tablet) to communicate with removable modules. It allows protocols to be defined that use a common remote procedure call mechanism to communicate with and control functionality on a module. Modules may be added to or removed from a running system, and Greybus defines how new modules are recognized and configured for use, and allows them to be gracefully removed from the system at any time. + +Modules are envisioned to provide virtually unlimited capabilities--speakers, cameras, flash storage, displays, automobile remotes, and other functions not yet imagined. The Greybus architecture provides a way for additional features to be added to a phone long after it has been purchased (or even designed). Greybus is built as an application layer on the MIPI UniPro stack, but its basic constructs are generic enough that it could be layered on other transports as well. + +Linaro CEO George Grey said “Linux is a truly collaborative project. While we are proud to have achieved the top contributor position for the first time, working in the kernel and other open source projects is a key part of our mission, and we are very pleased to be contributing at any level. We are excited that, despite the closing of Project Ara, this work has been merged upstream - we believe that it will be used as the model for future modular products based on Linux, and we look forward to seeing products utilizing this code for new solutions in the future.” diff --git a/src/content/blogs/linaro-14-04-release-now-available-download.mdx b/src/content/blogs/linaro-14-04-release-now-available-download.mdx new file mode 100644 index 0000000..bcd7587 --- /dev/null +++ b/src/content/blogs/linaro-14-04-release-now-available-download.mdx @@ -0,0 +1,74 @@ +--- +title: Linaro 14.04 Release Now Available for Download! +description: In this article, Jennifer Castelino provides an overview of what + has been accomplished on Linaro's latest 14.04 release, which is available to + download today! +image: linaro-website/images/blog/Banner_Linux_Kernel +tags: + - android + - toolchain +author: jennifer-castelino +date: 2014-04-24T18:06:07.000Z +link: /blog/linaro-14-04-release-now-available-download/ +related: [] + +--- + +> "The world is full of magical things patiently waiting for our wits to grow sharper." *\~ Bertrand Russell* + +Linaro 14.04 release is now available for download.  See the detailed highlights of this release to get an overview of what has been accomplished by the Working Groups, Landing Teams and Platform Teams. The release details are linked from the Details column for each released artifact on the release information: + +* [https://wiki-archive.linaro.org/Cycles/1404/Release#Release\_Information](https://wiki-archive.linaro.org/Cycles/1404/Release#Release_Information) + +We encourage everybody to use the 14.02 release. + +This post includes links to more information and instructions for using the images. The download links for all images and components are available on our downloads page: + +* [/downloads/](/downloads/) + +**USING THE ANDROID-BASED IMAGES** + +The Android-based images come in three parts: system, userdata and boot. These need to be combined to form a complete Android install. For an explanation of how to do this please see: + +* [http://wiki-archive.linaro.org/Platform/Android/ImageInstallation](http://wiki-archive.linaro.org/Platform/Android/ImageInstallation) + +If you are interested in getting the source and building these images yourself please see the following pages: + +* [http://wiki-archive.linaro.org/Platform/Android/GetSource](http://wiki-archive.linaro.org/Platform/Android/GetSource) +* [http://wiki-archive.linaro.org/Platform/Android/BuildSource](http://wiki-archive.linaro.org/Platform/Android/BuildSource) + +**USING THE UBUNTU-BASED IMAGES** + +The Ubuntu-based images consist of two parts. The first part is a hardware pack, which can be found under the hwpacks directory and contains hardware specific packages (such as the kernel and bootloader). The second part is the rootfs, which is combined with the hardware pack to create a complete image. For more information on how to create an image please see: + +* [http://wiki-archive.linaro.org/Platform/DevPlatform/Ubuntu/ImageInstallation](http://wiki-archive.linaro.org/Platform/DevPlatform/Ubuntu/ImageInstallation) + +**USING THE OPEN EMBEDDED-BASED IMAGES** + +With the Linaro provided downloads and with Arm’s Fast Models virtual platform, you may boot a virtual Armv8 system and run 64-bit binaries. + +**GETTING INVOLVED** + +More information on Linaro can be found on our websites: + +* Homepage: [](/) +* Wiki: [http://wiki-archive.linaro.org](http://wiki-archive.linaro.org/) + +Also subscribe to the important Linaro mailing lists to stay on top of Linaro developments: + +* Announcements: [https://lists.linaro.org/mailman3/lists/linaro-announce.lists.linaro.org/](https://lists.linaro.org/mailman3/lists/linaro-announce.lists.linaro.org/) +* Development: [https://lists.linaro.org/mailman3/lists/linaro-dev.lists.linaro.org/](https://lists.linaro.org/mailman3/lists/linaro-dev.lists.linaro.org/) + +**KNOWN ISSUES WITH THIS RELEASE** + +For any errata issues, please see: + +* [http://wiki-archive.linaro.org/Cycles/1404/Release#Known\_Issues](https://wiki-archive.linaro.org/Cycles/1404/Release) + +Bug reports for this release should be filed in Launchpad against the individual packages that are affected. If a suitable package cannot be identified, feel free to assign them to: + +* [http://www.launchpad.net/linaro](http://www.launchpad.net/linaro) + +**UPCOMING LINARO CONNECT EVENTS: LINARO CONNECT USA 2014** + +Registration for Linaro Connect USA 2014 (LCU14), which will be in Burlingame, California from September 15 - 19, 2014 is now open. diff --git a/src/content/blogs/linaro-16-04-release-available-for-download-2.mdx b/src/content/blogs/linaro-16-04-release-available-for-download-2.mdx new file mode 100644 index 0000000..299aa6b --- /dev/null +++ b/src/content/blogs/linaro-16-04-release-available-for-download-2.mdx @@ -0,0 +1,70 @@ +--- +excerpt: Linaro's 16.04 release is now available for download. See the detailed + highlights of this release and an overview of what has been accomplished by + each team along with all available software downloads. +keywords: Linaro +description: Linaro's 16.04 release is now available for download. See detailed + overview & highlights of what has been accomplished in this release, including + current downloads. +image: linaro-website/images/blog/Banner_Core_Technologies +tags: + - android + - linux-kernel + - open-source +author: linaro +title: Linaro 16.04 Release Available for Download +date: 2016-04-29T14:51:39.000Z +link: /blog/linaro-16-04-release-available-for-download-2/ +related: [] + +--- + +> “Digital circuits are made from analog parts." ***\~ Don Vonada*** + +Linaro 16.04 release is now available for download. See the detailed highlights of this release to get an overview of what has been accomplished by the Working Groups, Landing Teams and Platform Teams. We encourage everybody to use the 16.04 release. To sign-up for the release mailing list go here: [https://lists.linaro.org/mailman3/lists/linaro-release.lists.linaro.org/](https://lists.linaro.org/mailman3/lists/linaro-release.lists.linaro.org/) + +Both LSK and LNG tarball releases have been discontinued this cycle and the preferred way of procuring a release is through [git.linaro.org](http://git.linaro.org/). + +This post includes links to more information and instructions for using the images. The download links for all images and components are available on our downloads page: + +* [/downloads/](/downloads/) + +**USING THE ANDROID-BASED IMAGES** + +The Android-based images come in three parts: system, userdata and boot. These need to be combined to form a complete Android install. For an explanation of how to do this please see: + +* [http://wiki-archive.linaro.org/Platform/Android/ImageInstallation](http://wiki-archive.linaro.org/Platform/Android/ImageInstallation) + +If you are interested in getting the source and building these images yourself please see the following pages: + +* [http://wiki-archive.linaro.org/Platform/Android/GetSource](http://wiki-archive.linaro.org/Platform/Android/GetSource) + +* [http://wiki-archive.linaro.org/Platform/Android/BuildSource](http://wiki-archive.linaro.org/Platform/Android/BuildSource) + +**USING THE UBUNTU-BASED IMAGES** + +The Ubuntu-based images consist of two parts. The first part is a hardware pack, which can be found under the hwpacks directory and contains hardware specific packages (such as the kernel and bootloader). The second part is the rootfs, which is combined with the hardware pack to create a complete image. For more information on how to create an image please see: + +* [http://wiki-archive.linaro.org/Platform/DevPlatform/Ubuntu/ImageInstallation](http://wiki-archive.linaro.org/Platform/DevPlatform/Ubuntu/ImageInstallation) + +**USING THE OPEN EMBEDDED-BASED IMAGES** + +With the Linaro provided downloads and with Arm’s Fast Models virtual platform, you may boot a virtual Armv8 system and run 64-bit binaries. + +**GETTING INVOLVED** + +More information on Linaro can be found on our websites: + +* Homepage: [](/) + +* Wiki: [http://wiki-archive.linaro.org](http://wiki-archive.linaro.org/) + +Also subscribe to the important Linaro mailing lists to stay on top of Linaro developments: + +* Announcements: [https://lists.linaro.org/mailman3/lists/linaro-announce.lists.linaro.org/](https://lists.linaro.org/mailman3/lists/linaro-announce.lists.linaro.org/) + +* Development: [https://lists.linaro.org/mailman3/lists/linaro-dev.lists.linaro.org/](https://lists.linaro.org/mailman3/lists/linaro-dev.lists.linaro.org/) + +**KNOWN ISSUES WITH THIS RELEASE** + +* Bug reports for this release should be filed in Bugzilla ([http://bugs.linaro.org](http://bugs.linaro.org/)) against the individual packages or projects that are affected. diff --git a/src/content/blogs/linaro-and-distributions.mdx b/src/content/blogs/linaro-and-distributions.mdx new file mode 100644 index 0000000..dcb75ca --- /dev/null +++ b/src/content/blogs/linaro-and-distributions.mdx @@ -0,0 +1,30 @@ +--- +author: david-rusling +date: 2010-10-26T18:50:00.000Z +link: /blog/community-blog/linaro-and-distributions/ +title: Linaro and Distributions +description: Linaro works in upstream open source projects, ensuring that the Arm architecture and embedded platforms are well supported. +tags: + - arm + - linux-kernel +related: [] + +--- + +Linaro works in upstream open source projects, ensuring that the Arm architecture and embedded platforms are well supported. Along the way, the Arm community is learning how to be more open and collaborative, both within the open source community and between ourselves. It's a very interesting, worthy of a future blog entry; watching Arm and its partners adapting themselves to align with an open source based platform world. + +The difficulty with upstream working is the lag between upstream work, upstream stable releases and inclusion in distributions and, therefore, products.This is particularly critical for code bases with long development cycles. GCC, for example, makes one major release per year, in spring, with several bug fixing updates. That is, new features come out once a year and, any code donated before the end of 2010 will come out in the 4.6.0 branch made around April 2011. Worse still, the 4.6 branch will not be used by distributions until it is considered to be stable. This will be 4.6.1 and will not be used by distributions until Autumn 2011. In other words, any code donated into GCC before end of 2010 will not be used by a distribution until Autumn 2011. Of course, other code bases have different release cadences. The Linux kernel is creating new point releases roughly every couple of months. + +When Linaro started, I thought that distributions would want the six monthly 'baseline' release. That is, a set of built and co-tested binaries produced at the end of each 6 monthly development cycle. This turns out (mostly) not to be true. What they are much more interested in are Linaro's consolidation trees. These are code trees based on a project's current stable release but with additional features that have been fed upstream. An example would be GCC 4.5 plus upstream patches giving better Armv7A Thumb-2 code performance. The baseline releases are still important; they are used to underpin the development in the next cycle and they are starting to be used by Linaro's members as they develop new platforms. + +One danger of consolidation trees though, is that they might hold patches that will never go upstream. This is why the 'rule' in Linaro is that a patch must be accepted upstream before it can go in a consolidation tree. Although, quite what accepted means varies by code base and engineering practice. It is also worth noting that, in the process of being merged upstream, patches may get reworked.This creates some engineering drag, in that consolidation trees need engineering effort in order to maintain and test them. This stability and support is as important as the distributions are, effectively, treating Linaro consolidation trees as they would upstream open source projects. + +One trend I have noticed over the last 12 months, is that Linux distributions, particularly those from an embedded heritage, are trying to move to the latest stable version of upstream projects. In particular, the core toolchains and the Linux kernel. This aligns well with Linaro's approach, and also makes consolidation trees easier to provide. The amount of engineering effort needed to maintain a consolidation tree three releases behind the leading edge of an open source project is much greater than maintaining one that is only one release behind. As an example, maintaining a 2.6.35 consolidation tree is relatively light engineering effort, whereas maintaining a 2.6.32 based consolidation tree would be much harder. + +Why do distributions not want to take binaries? I think that this is about timing and build systems. Timing wise, it is much easier to align with a monthly release than with a six monthly release. Distributions started taking Linaro's toolchain output in the middle of this cycle (and will take the next release in the middle of the next cycle). Waiting until the end of this cycle would have delayed adoption by 3 to 6 months. + +There are many build systems being used in Linux today, to name but a few; the Linux Foundation supports Open Build System (OBS), OpenEmbedded, Gentoo, Red Hat packaging system (RPM), Debian and Ubuntu's Launchpad. Whilst they are all different, they have similar aims. They allow the inclusion of upstream stable releases to be imported into the build system and be modified by patch sets. This allows distributions to track the upstream projects, whilst maintaining a high degree of control over bug fixes and features. They often incorporate bug and work tracking systems as well as validation features. Each distribution is, therefore, deeply wedded to its build system, relying upon it to make on time, feature rich product releases. + +Even if all of the world's Linux distributions used the same build system, there would still be the complexity of choice, as each distribution chooses the technologies on which to base their release,which versions of the upstream projects and the build options used to build the release. All of this mitigates against distributions taking binary outputs from Linaro and leads them, instead, to treat Linaro as an upstream project, taking Linaro's consolidation trees where this enhances their products. + +In summary, the interaction between Linaro and the various distributions happens via upstream open source projects and the consolidation trees that it creates and maintains. diff --git a/src/content/blogs/linaro-and-microsoft-collaborate-on-secure-media-solutions-for-arm-based-socs.mdx b/src/content/blogs/linaro-and-microsoft-collaborate-on-secure-media-solutions-for-arm-based-socs.mdx new file mode 100644 index 0000000..7ac7966 --- /dev/null +++ b/src/content/blogs/linaro-and-microsoft-collaborate-on-secure-media-solutions-for-arm-based-socs.mdx @@ -0,0 +1,54 @@ +--- +author: linaro +date: 2015-09-09T12:01:21.000Z +description: Linaro announced a major step forward in the delivery of an open + source secure media solution for the consumption of premium content on Arm + Powered devices. The Linaro Digital Home Group (LHG), with support from the + Microsoft PlayReady team and the OpenCDM project, has successfully integrated + several security features required by premium content service providers with + the Microsoft® PlayReady® Digital Rights Management (DRM). +excerpt: Linaro announced a major step forward in the delivery of an open source + secure media solution for the consumption of premium content on Arm Powered + devices. The Linaro Digital Home Group (LHG), with support from the Microsoft + PlayReady team and the OpenCDM project, has successfully integrated several + security features required by premium content service providers with the + Microsoft® PlayReady® Digital Rights Management (DRM). +link: /news/linaro-and-microsoft-collaborate-on-secure-media-solutions-for-arm-based-socs/ +tags: + - arm + - linux-kernel + - open-source +title: Linaro and Microsoft collaborate on secure media solutions for Arm-based SoCs +related: [] + +--- + +Cambridge, UK; 9 September 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced a major step forward in the delivery of an open source secure media solution for the consumption of premium content on Arm Powered devices. The Linaro Digital Home Group (LHG), with support from the Microsoft PlayReady team and the OpenCDM project, has successfully integrated several security features required by premium content service providers with the Microsoft® PlayReady® Digital Rights Management (DRM). + +This new solution enables application developers, silicon partners, OEMs, operators, and content owners to use open source technology to build feature-rich, secure products for the Pay TV market. By bringing together all of the essential secure hardware and software elements into an open source design, OEMs can reduce their time-to-market and open up new opportunities for service providers to deliver premium content across more consumer devices built on Arm-based SoCs. + +The essential security features include the World Wide Web Consortium’s (W3C) Encrypted Media Extensions (EME), which enables premium content service providers to write their electronic programming guide applications using standard HTML5 once and run it on many devices. Examples of W3C EME compliant devices include set top boxes, SmartTVs, smartphones, tablets, PCs and game devices. The major development in this solution is the integration of Microsoft’s PlayReady DRM with W3C EME, OpenCDM, Chromium and Linaro’s Open Portable Trusted Execution Environment (OP-TEE) on Arm TrustZone® technology. + +The secure media solution has been implemented on an STMicroelectronics STiH410 SoC with an Arm Cortex®-A9 processor at its core. The new solution integrates the following key components: + +* W3C EME (latest version published on 31 March 2015[ http://www.w3.org/TR/encrypted-media/](http://www.w3.org/TR/encrypted-media/)) +* [Microsoft PlayReady](http://www.microsoft.com/playready/) DRM Porting Kit v3.0 +* OP-TEE ([https://github.com/OP-TEE](https://github.com/OP-TEE)) +* OpenCDM ([https://github.com/fraunhoferfokus/open-content-decryption-module](https://github.com/fraunhoferfokus/open-content-decryption-module)) +* Chromium v43 + +“The Linaro Digital Home Group is extremely pleased to deliver this open source secure media solution to the embedded developer community” said Mark Gregotski, Director of the Linaro Digital Home Group. “This collaboration demonstrates how a commercial DRM, such as Microsoft’s PlayReady, can be integrated into a security framework comprised of open-source components, including the Linaro Open Portable TEE running on Arm TrustZone. We hope this will be the catalyst to accelerate the deployment of secure DRM solutions employing open source software.” + +“This is a key milestone that showcases how Microsoft PlayReady DRM works cross-platform in a standard way. We are excited about the collaboration with Linaro, Arm, OP-TEE and OpenCDM. This reference implementation simplifies and accelerates the ability of partners to build rich experiences to deliver secure media solutions, while providing market leading content protection using Microsoft PlayReady” said Dave Bossio, Group Program Manager, Windows Devices Group, Security at Microsoft Corporation. + +“Trust is key to future media business models, as valuable content must be protected from server to screen,” said Shiv Ramamurthi, Director, Home Segment Marketing, Arm. “The pay TV ecosystem will see immediate content security benefits from the integration of Arm TrustZone and Microsoft PlayReady DRM technology. This latest open source initiative led by the Linaro Home Group is a milestone in the enablement of next-generation secure content and media experiences for consumers.” + +“ST has been a strong contributor to the Open Portable Trusted Execution Environment (OP-TEE) in open source, a key enabler for this integration. As a natural step forward, ST is pleased its STiH410 platform is being used as a vehicle for this effort and for an exciting demo at IBC 2015,” said Yingchih Yang, Advanced System and Security Officer of the Consumer Product Division in STMicroelectronics. “Such Linaro contributions will facilitate premium content consumption across various devices including smartphones, tablets, and set-top-boxes, meeting strong market expectations.” + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company is a collaborative engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit [](/). diff --git a/src/content/blogs/linaro-announces-96boards-initiative-accelerate-arm-software-development.mdx b/src/content/blogs/linaro-announces-96boards-initiative-accelerate-arm-software-development.mdx new file mode 100644 index 0000000..9f6a341 --- /dev/null +++ b/src/content/blogs/linaro-announces-96boards-initiative-accelerate-arm-software-development.mdx @@ -0,0 +1,50 @@ +--- +title: Linaro announces 96Boards initiative to accelerate Arm software development +description: 96Boards is an open hardware specification for Arm 32-bit and + 64-bit developer boards, and a Community Program for software delivery to + developers, makers and OEMs +image: linaro-website/images/blog/Code_Image_Core_tech +tags: + - linaro-connect + - linux-kernel + - open-source +author: linaro +date: 2015-02-09T01:55:19.000Z +link: /news/linaro-announces-96boards-initiative-accelerate-arm-software-development/ +related: [] + +--- + +## 96Boards is an open hardware specification for Arm 32-bit and 64-bit developer boards, and a Community Program for software delivery to developers, makers and OEMs + +HONG KONG, China; 9  FEBRUARY 2015 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced the launch of the 96Boards initiative. + +96Boards is the first open hardware specification that provides a platform for the delivery of compatible low cost, small footprint 32-bit and 64-bit Cortex-A boards from the full range of Arm SoC vendors. Standardized expansion buses for peripheral I/O, display and cameras allow the hardware ecosystem to develop a range of compatible add-on products that will work on any 96Boards product over the lifetime of the platform. + +In addition, the 96Boards website at [www.96Boards.org](https://www.96boards.org/) provides software downloads and updates, information on products compatible with 96Boards and a forum for software developers, makers and OEMs. + +The 96Boards initiative is designed to offer a single software and hardware community across multiple vendor boards supporting a range of different features. A fixed set of minimum functions including USB, SD, HDMI and standardized low speed and high speed peripheral connectors are provided. Vendors may add customized hardware and feature sets provided the minimum functions are available. We expect this to extend the platform life, increase the market for add-on hardware, and accelerate open source upstreaming of support for new SoC features. + +Specifications for low-cost Armv7-A and Armv8-A development boards target the mobile, embedded, digital home, networking and server segments. The first of these standards - the Consumer Edition - is available now from [www.96Boards.org](https://www.96boards.org/). The second - the Enterprise Edition - will be available in Q2 2015.  These specifications are intended to foster the delivery of multiple Arm hardware platforms targeted at software developers, the maker community, higher education, and embedded OEMs. + +Prior to the launch of 96Boards compatible boards, Linaro will work with the board manufacturers and chip suppliers to bring up core software and ensure stability of the platform, including continuous integration testing in Linaro’s Automated Validation Architecture (LAVA). The core software will include builds of Debian and Android running on a recent mainline kernel. Additional software may be made available, depending on the target market for the individual boards. + +**Supporting Quotes** + +“Actions Technology plans to combine its low-cost design and manufacturing experience with Linaro’s software expertise to produce the most cost-effective development platform for the full spectrum of software developers, from professional developers in large corporations to individual hobbyists and makers,” said Dr. Zhenyu Zhou, President of Actions Technology. “We look forward to helping build the 96Boards community and bring the benefits of Actions Technology’s products to developers around the world.” + +“The work we’re doing with Linaro is key for driving collaborative software engineering on the Arm architecture,” said Suresh Gopalakrishnan, general manager and corporate vice president, Server Business Unit at AMD. “We look forward to the 96Boards initiative not only as an effort to extend ecosystem partner collaboration, but also to enable the developer community with a cost-effective platform for Arm server software development.” + +“Linaro’s initiative is perfectly timed to strengthen the 64-bit developer community,” said Vincent Korstanje, vice president of marketing, systems and software group, Arm. “A diverse range of Armv8-A based devices will appear in consumer, embedded and networking infrastructure markets this year. This program will fuel the pace of 64-bit innovation by providing developers access to low-cost development boards.” + +“We are incredibly excited to be a part of the Linaro effort,” said Glenn Carlson, Corporate Supplier Manager at Arrow Electronics.  “As a solutions oriented partner, we are continually seeking new and exciting ways to help our customers solve their most challenging problems.  Our Arm expertise and supply chain experience coupled with Linaro enablement will allow us to explore technologies we never thought possible.” + +“The 96Boards program takes on one of the greatest challenges facing designers looking to leverage Arm-based hardware platforms – software complexity and compatibility,” said Tim Barber, senior vice president, design chain business development, Avnet Electronics Marketing, an operating group of Avnet, Inc. (NYSE: AVT). “This program, and its supporting development community, will expand the array of cost-effective, Arm-based platforms available to engineers and will accelerate time-to-market with next-generation equipment and devices.” + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of open source on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit [](). diff --git a/src/content/blogs/linaro-announces-actions-technology-founding-member-linaro-community-boards-group.mdx b/src/content/blogs/linaro-announces-actions-technology-founding-member-linaro-community-boards-group.mdx new file mode 100644 index 0000000..5226349 --- /dev/null +++ b/src/content/blogs/linaro-announces-actions-technology-founding-member-linaro-community-boards-group.mdx @@ -0,0 +1,44 @@ +--- +author: linaro +date: 2015-02-09T01:55:53.000Z +description: Actions Technology invests in enabling the open source development + community with the development of a new cost-effective Arm 64-bit development + board +link: /news/linaro-announces-actions-technology-founding-member-linaro-community-boards-group/ +tags: + - linaro-connect + - linux-kernel + - open-source +title: Linaro announces Actions Technology as a founding member of the Linaro + Community Boards Group +related: [] + +--- + +## Actions Technology invests in enabling the open source development community with the development of a new cost-effective Arm 64-bit development board + +HONG KONG, China; 9  FEBRUARY 2015 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today at Linaro Connect Hong Kong 2015 announced that leading Chinese fabless semiconductor company Actions Technology (Zhuhai), a wholly owned subsidiary of Actions Semiconductor Co., Ltd (NASDAQ: ACTS),  has joined Linaro as a founding member of the new Linaro Community Boards Group (LCG). + +The LCG has been formed to support the new 96Boards initiative. 96Boards is the first open hardware specification that provides a platform for the delivery of compatible low-cost, small footprint 32-bit and 64-bit Cortex-A boards. Standardized expansion buses for peripheral I/O, display and cameras allow the hardware ecosystem to develop a range of compatible add-on products that will work on multiple vendor’s 96Boards products over the lifetime of the platform. The LCG Steering Committee will manage the evolution of the 96Boards open specifications and the development of the 96Boards community. The 96Boards website provides software downloads and updates, information on compatible products, and forums for software developers, makers and OEMs to get community software support and downloads for all 96Boards products. + +“Reducing the cost of access to the latest Arm 64-bit hardware means more developers will be working on the 64-bit Arm ecosystem. This will result in accelerated innovation and new, high quality software solutions. With Actions Technology’s deep rooted knowledge in building highly effective systems at competitive prices, we expect Actions’ 64-bit 96Boards product will be widely utilized by developers working with Linaro” said Steve Taylor, Director of the 96Boards program. “We warmly welcome Actions as a member of Linaro and look forward to the open source community developing on their hardware platforms.” + +Actions Technology has joined the LCG to combine its low-cost design and manufacturing experience with Linaro's software expertise to produce the most cost-effective development platform for a broad range of software developers, from professional developers in large corporations to individual hobbyists and makers. This new platform is expected to be available in the second quarter of this year and is based around its new Falcon Series product family, the S900. This SoC is a new generation, ultra-high performance solution featuring a quad-core Arm® Cortex-A53 CPU and a high performance Rogue Series G6230 GPU from Imagination Technologies. + +“From mobile devices to the digital home, the electronics industry has been undergoing dramatic change, with a mass migration from single-core to dual-core to quad-core chipsets over the span of just a few years and the transition from 32-bit to 64-bit architecture for smart phones and tablets has accelerated dramatically in 2014,” said Dr. Zhenyu Zhou, President  of Actions Technology.  “The 64-bit revolution is underway and we are confident that our efforts in the Open Development Platform such as 96Boards will ensure all developers to have most cost-effective and best technology access to the latest 64-bit Arm architecture to accelerate device innovation for all types of connected products, from smartphones, Android tablets and OTT set-top boxes to wearables and a range of new IoT solutions.” + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 150 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit [](). + +**About Actions Technology (Zhuhai)** + +Actions Technology focuses on solutions for smart handheld and smart home devices. Its main products are tablet, over-the-Top (“OTT”) set-top boxes and *Bluetooth*® wireless technology boombox solutions, along with total product and technology solutions based on the Android platform. These products feature high performance, low power multi-core CPUs and GPUs, supporting high resolution displays and wireless connectivity. Actions Technology currently has nearly 500 employees, 425 of whom are engineers. Among these engineers, more than half have over three years of research and development experience. Actions Technology is ranked among the first in terms of engineering talent, project experience and product quality within the IC design industry in China. Zhuhai Actions Technology is a wholly owned subsidiary of Actions Semiconductor Co. Ltd.. + +**About Actions Semiconductor Co., Ltd.** + +Actions Semiconductor is one of China's leading fabless semiconductor companies that provides comprehensive portable multimedia and mobile internet system-on-a-chip (SoC) solutions for portable consumer electronics. Actions Semiconductor products include SoCs, firmware, software, solution development kits, as well as detailed specifications of other required components. Actions Semiconductor also provides total product and technology solutions that allow customers to quickly introduce new portable consumer electronics to the mass market in a cost effective way. The Company is headquartered in Zhuhai, China, with offices in Shanghai, Shenzhen, Hong Kong and Taipei. For more information, please visit the Actions Semiconductor website at [http://www.actions-semi.com](http://www.actions-semi.com/). diff --git a/src/content/blogs/linaro-announces-alibaba-group-as-latest-member.mdx b/src/content/blogs/linaro-announces-alibaba-group-as-latest-member.mdx new file mode 100644 index 0000000..05a0fef --- /dev/null +++ b/src/content/blogs/linaro-announces-alibaba-group-as-latest-member.mdx @@ -0,0 +1,37 @@ +--- +author: linaro +date: 2015-04-02T10:57:14.000Z +description: Alibaba Group joins Linaro to accelerate development of software + platforms for Arm Powered servers +link: /news/linaro-announces-alibaba-group-as-latest-member/ +tags: + - arm + - linux-kernel +title: Linaro announces Alibaba Group as latest member +related: [] + +--- + +## Alibaba Group joins Linaro to accelerate development of software platforms for Arm Powered servers + +CAMBRIDGE,UK; 2 APRIL 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that [Alibaba](http://www.alibaba.com) has joined Linaro as a Group Member of the [Linaro Enterprise Group](https://wiki-archive.linaro.org/LEG) (LEG). + +The availability of a number of Armv8-A 64-bit silicon solutions and software platforms creates additional choice and avenues of innovation for data centers. These platforms aim to bring the energy-efficient processing of Arm-based processors, coupled with a variety of workload optimized technologies that will help data centers reduce their Total Cost of Ownership (TCO). Alibaba will collaborate with a range of companies within LEG to optimize the Armv8-A software platforms and maximize potential savings in a range of real deployment use cases. + +“Alibaba Group’s infrastructure carries the world’s largest e-commerce ecosystem, in addition to China’s leading cloud services,” said Shuanlin Liu, Chief Architect of Alibaba Infrastructure Service. “We need the best technical solutions as we step into the DT (Data Technology) era. Hence, we’re investing heavily in the innovation of a wide range of technologies, including the Arm architecture. We will continue to work closely with partners to accelerate the development and growth of the ecosystem.” + +Cloud-based services handle billions of revenue generating transactions today with significant growth predicted in the coming years. Large scale web and cloud properties rely on rapid and open innovation to achieve increased efficiency, scalability and intelligence throughout their infrastructure. The Arm ecosystem enables partners to innovate and develop workload optimized solutions, while safeguarding existing investments by leveraging a common instruction set architecture. Next-generation Arm-based cloud infrastructure will help businesses scale predictably; enabling access to important services whenever and wherever needed. + +In 2014, three members publicly demonstrated fully integrated Armv8-A 64-bit server silicon with initial production shipments underway. Target workloads include cloud hosting, web serving, caching, scale-out storage and analytics – all key components of a modern cloud software stack and areas where the Linaro Enterprise Group is actively engaged as an extended engineering team on behalf of its members. + +“As one of the world’s largest cloud operators, Alibaba is continually pushing technology boundaries to efficiently deploy new services at a massive scale,” said Lakshmi Mandyam, Director, Server Systems and Ecosystems, Arm. “Their collaboration with the Arm ecosystem will accelerate and expand open source software choices for companies wishing to deploy Armv8-A based servers. We welcome Alibaba’s participation in Linaro and the new dimension it will bring to an already vibrant community.” + +“Since its start in November 2012, the Linaro Enterprise Group and its members have been developing an open source software platform in preparation for the release of multiple Armv8-A 64-bit silicon solutions,” said Robert Booth, Linaro's Chief Operating Officer (COO). “Now is the right time for data center hosts and others to accelerate the launch of industry leading solutions by helping define actual workloads and by working with Linaro's engineering organization to develop and optimize the software. We are excited to welcome Alibaba as a Linaro member and look forward to working with them and benefiting from their breadth of sector experience to help fully realize the advantages of the Arm architecture in this market place.” + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies collaborate with Linaro's own engineering team to define the future of open source on Arm. The company's engineering organization comprises over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: its goal is to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit [](/). diff --git a/src/content/blogs/linaro-announces-allwinner-technology-as-a-founding-member-of-the-new-linaro-digital-home-group.mdx b/src/content/blogs/linaro-announces-allwinner-technology-as-a-founding-member-of-the-new-linaro-digital-home-group.mdx new file mode 100644 index 0000000..b837640 --- /dev/null +++ b/src/content/blogs/linaro-announces-allwinner-technology-as-a-founding-member-of-the-new-linaro-digital-home-group.mdx @@ -0,0 +1,47 @@ +--- +title: Allwinner Technology joins the Linaro Digital Home Group +description: Allwinner Technology, a leading Chinese application processor + design company joins the Linaro Digital Home Group. Read more here. +image: linaro-website/images/blog/30921188158_953bca1c9f_k +author: linaro +date: 2014-03-03T12:05:03.000Z +link: /news/linaro-announces-allwinner-technology-as-a-founding-member-of-the-new-linaro-digital-home-group/ +tags: [] +related: [] + +--- + +## Allwinner Technology invests in open source community by joining Linaro + +MACAU, CHINA - 3 MAR 2014 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today at Linaro Connect Asia 2014 (LCA14) in Macau [announced\*](http://www.youtube.com/watch?v=L7gPPJSNJBM) that leading Chinese application processor design company Allwinner Technology has joined Linaro as a group member. + +Linaro has announced a total of 29 member companies working together to accelerate open source software development for the Arm architecture. + +Allwinner Technology is a founding member of a new market segment group being formed in Linaro to focus on the Digital Home market. This group will be the third Linaro segment group, following the formation of the Linaro Enterprise Group (LEG), focused on Arm servers, and the Linaro Networking Group (LNG) focused on the networking equipment market space. + +“China is a significant area of development for Linaro and open source software is gathering significant strategic momentum both in the local regional market and with global customers,” said Joe Bates, Linaro Member Services EVP. “We’re particularly happy to welcome Allwinner as a member of the new Linaro digital home group and look forward to working with them on open source software and on accelerating time to market for new products in this rapidly developing segment.” + +“We are pleased to join Linaro as a founding member of the new Linaro Digital Home Group,” said Jack Lee, Chief Marketing Officer of Allwinner. “We will take an active role in the organization and work with Linaro and the open source community to drive new Arm technologies.” + +*\*Video of the announcement is available on YouTube:* [*http://www.youtube.com/watch?v=L7gPPJSNJBM*](http://www.youtube.com/watch?v=L7gPPJSNJBM)\_. *See Allwinner announcement at 24 minutes 28 seconds.* + +About Linaro + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 150 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top three company contributors to recent Linux kernels\*. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit [](/). + +\*LWN lists Linaro as the number 2 company contributor to kernels 3.12 and 3.13 and #3 to kernels 3.10 and 3.11:  [http://lwn.net/Articles/579081/](http://lwn.net/Articles/579081/),[http://lwn.net/Articles/570483/](http://lwn.net/Articles/570483/),[http://lwn.net/Articles/563977/](http://lwn.net/Articles/563977/),[http://lwn.net/Articles/555968/](http://lwn.net/Articles/555968/). + +About Allwinner + +Allwinner Technology is a leading fabless design company dedicated to smart application processor SoCs and smart analog ICs. Its product line includes multi-core application processors for smart devices and smart power management ICs used by brands worldwide. + +With its focus on cutting edge UHD video processing, high performance multi-core CPU/GPU integration, and ultra-low power consumption, Allwinner Technology is a mainstream solution provider for the global tablet, internet TV, smart home device, automotive in-dash device, smart power management, and mobile connected device markets. Allwinner Technology is headquartered in Zhuhai, China. + +*www.allwinnertech.com* +Follow us on Twitter @Allwinnertech + +For more information on the company, access to software and tools, and information on the community and open engineering, visit [www.linaro.org](/) diff --git a/src/content/blogs/linaro-announces-arm-based-developer-cloud-2.mdx b/src/content/blogs/linaro-announces-arm-based-developer-cloud-2.mdx new file mode 100644 index 0000000..2324661 --- /dev/null +++ b/src/content/blogs/linaro-announces-arm-based-developer-cloud-2.mdx @@ -0,0 +1,54 @@ +--- +excerpt: "Linaro announces the rollout of an Armv8 based Developer Cloud today + at Linaro Connect in Bangkok. " +title: Linaro announces Arm Based Developer Cloud +description: Linaro announced the rollout of an Armv8 based Developer Cloud in + Bangkok providing developers access to a cloud-based Arm development + environment. +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +author: linaro +date: 2016-03-07T11:01:23.000Z +tags: + - linux-kernel + - open-source +link: /news/linaro-announces-arm-based-developer-cloud-2/ +related: [] + +--- + +Bangkok, Thailand; 7 March 2016 + +Linaro Ltd., the collaborative engineering organization developing open source software for the Arm® architecture, announced the rollout of an Armv8 based Developer Cloud at Linaro Connect in Bangkok 2016. In collaboration with its silicon, server and software members, Linaro was providing developers with access to a cloud-based native Arm development environment, which could be used to design, develop, port and test server, cloud and IoT applications without substantial upfront hardware investment. Linaro Developer Cloud is no longer an active project and was fully decommissioned in 2021. + +Linaro established two Developer Cloud facilities, one based in Cambridge, UK and the second based in Austin, Texas. Over time the Developer Cloud expanded through participating Linaro member and member partner data-centers providing cloud regions in China, North America and Europe. + +“Linaro works with its members to provide reference open source software to accelerate the development of innovative applications taking advantage of Arm based platforms”, said George Grey, CEO of Linaro. “As the adoption of Arm based servers accelerates and IoT applications rapidly evolve, software developers need access to hardware and easy to use software reference platforms. The Linaro Developer Cloud is designed to broaden the availability of the latest hardware to developers globally, and to enable commercial and private cloud providers to utilize the implementation to accelerate deployment of their own offerings. Linaro will publish the end to end open source code for the implementation of the Developer Cloud” + +The Developer Cloud was the combination of Arm based silicon vendors’ server hardware platforms, emerging cloud technologies, and many Linaro member driven projects, including server class boot architecture, kernel and virtualization. These projects had been under development since the formation of the Linaro Enterprise Group (LEG) and Linaro had already been enabling key developers via remote access to bare metal Arm servers for the last year. + +The Developer Cloud was based on OpenStack, leveraging both Debian and CentOS, as the underlying cloud OS infrastructure. It used Arm based server platforms from Linaro members AMD, Cavium,  Huawei and Qualcomm Technologies, Inc., and expanded with demand, and as new server platforms came to market. These platforms included both single socket and dual socket configurations as well as 10/40Gb networking, scalable storage and integrated accelerators that Arm SOC partners were bringing to market. + +Access to the Developer Cloud was provided via the linaro.cloud web portal which is no longer active. Through the portal, developers were able to request cloud access and report bugs and performance issues. The portal also provided a developer forum to share development and porting knowledge, as well as best practices for Arm servers. + +**Partner Quotes** + +**Suresh Gopalakrishnan, Corporate Vice President, Enterprise Solutions Engineering, AMD** +“AMD has been working closely with Linaro and its members to make available a complete software stack for installations like the Linaro Developer Cloud for AMD’s Arm based server processors” said Suresh Gopalakrishnan, Corporate Vice President, Enterprise Solutions Engineering at AMD. “The Opteron™ A1100 processor is built with these workloads in mind. We are providing the necessary support so the Opteron™ A1100 is readily available in the Linaro Developer Cloud and in a range of commercially available systems.” + +**Jeff Underhill,** **Director of Server Programs, Arm** +“Linaro’s reference software platform already gives developers easy access to open source project contributions that advance highly efficient Arm technologies across a variety of markets,” said Jeff Underhill, Director of Server Programs at Arm. “The Linaro Developer Cloud builds on this with a rich new development environment supporting a range of applications but it’s particularly exciting to see the immediate value this will bring to the growing Arm-based server ecosystem.” + +**Larry Wikelius, Vice President Software Ecosystem and Solutions Group, Cavium** +“Data Center customers are continuing to drive demand for Armv8 server solutions and the requirements for optimized software and applications are growing dramatically," said Larry Wikelius, Vice President Software Ecosystem and Solutions Group at Cavium. “Cavium has partnered with OVH to deliver a Public Cloud solution based on ThunderX®, Cavium’s Armv8 Workload Optimized Processor. We are pleased to be part of the Linaro Developer Cloud, which we expect would accelerate development of optimized software on Armv8 servers and enhance services offered by the Cloud providers and enterprises.” + +**Elsie Wahlig, Principal Engineer, Data Center Group, Qualcomm Technologies, Inc.** +“Qualcomm Technologies is pleased to be supporting the Linaro Developer Cloud,” says Elsie Wahlig, Principal Engineer, Data Center Group, Qualcomm Technologies, Inc. “As the software ecosystem for Arm servers gains momentum, the Developer Cloud presents a new vehicle for friction-less access to standards-based hardware and software to enable accelerated development by our software partners and open source community participants.” + +**Jon Masters, chief Arm architect, Red Hat** +“Since the founding of the Linaro Enterprise Group in 2012, Red Hat has been at the forefront in developing key open standards for the Arm ecosystem, and our active participation through the Fedora and CentOS communities as well as the Red Hat Arm Partner Early Access Program has helped to extend the reach of cloud computing to encompass Arm architecture” said Jon Masters, chief Arm architect, Red Hat. “We are pleased to see the availability of Linaro Developer Cloud and look forward to additional innovation streams that it will enable.” + +#### About Linaro + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [](/) and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-broadcom-new-member.mdx b/src/content/blogs/linaro-announces-broadcom-new-member.mdx new file mode 100644 index 0000000..4370ee0 --- /dev/null +++ b/src/content/blogs/linaro-announces-broadcom-new-member.mdx @@ -0,0 +1,33 @@ +--- +title: Linaro announces Broadcom as new member +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +author: linaro +date: 2013-05-02T11:25:47.000Z +link: /news/linaro-announces-broadcom-new-member/ +description: CAMBRIDGE, UK - 2 MAY 2013 +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 2 MAY 2013 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced Broadcom Corporation has joined Linaro as a club member. + +Broadcom, a global innovation leader in semiconductor solutions for wired and wireless communications, will contribute engineering resources to collaborate directly with Linaro and will participate in the organization’s steering committees directing the activities of Linaro’s engineering team. + +“We’ve been very impressed by the Broadcom engineers who have actively participated in our Linaro Connect events and in the establishment of the Linaro Networking Group (LNG),” said George Grey, CEO of Linaro. “Linaro is very pleased to welcome Broadcom as a member. We value Broadcom’s communications expertise and we look forward to delivering a high ROI, leveraging their input and contributions as part of the combined work of our engineering teams.” + +“We look forward to working with Linaro and the open source community to drive the Linux-on-Arm architecture,” said Ajith Mekkoth, Broadcom Vice President, Software Engineering, Mobile Platform Solutions.  “Together with Linaro, we are committed to accelerating innovation in the development of Linux-based devices and furthering the adoption of open-source software across the network.” + +Linaro has a unique business model where multiple companies jointly invest in a software engineering team that creates core open source software in a collaborative and transparent environment. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro becoming one of the largest company contributors to recent Linux kernels\*. Linaro’s contribution to improving Arm’s support in the open source Linux community has also been recognized by Linus Torvalds\*\*. + +**Sources**: *Some 3.8 development statistics* Jonathan Corbet, LWN, 13 February 2013: [https://lwn.net/Articles/537110/](https://lwn.net/Articles/537110/) (subscription required); *Statistics from the 3.7 development cycle* Jonathan Corbet, LWN, 28 November 2012:[http://lwn.net/Articles/527191/](http://lwn.net/Articles/527191/)(subscription required) and earlier LWN articles. + +**Sources**: *Linaro Connect (LCA13) Monday Keynote: Jon Corbet founder of LWN.net,* 4 March 2013:[http://www.youtube.com/watch?v=JAmPRljN\_Ww](http://www.youtube.com/watch?v=JAmPRljN_Ww) (from 13 minutes 15 seconds. ); *Torvalds touts Linux’s advances in power, Arm and cell phones* Paula Rooney, ZDNet, 30 August 2012 + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 150 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. diff --git a/src/content/blogs/linaro-announces-first-development-board-compliant-96boards-iot-edition-specification.mdx b/src/content/blogs/linaro-announces-first-development-board-compliant-96boards-iot-edition-specification.mdx new file mode 100644 index 0000000..4b0ca2e --- /dev/null +++ b/src/content/blogs/linaro-announces-first-development-board-compliant-96boards-iot-edition-specification.mdx @@ -0,0 +1,207 @@ +--- +title: Linaro Announces First Development Board Compliant with 96Boards IoT + Edition Specification +description: Cambridge, UK; 26 September 2016 +image: linaro-website/images/blog/iot_planet_under_2mb +tags: + - linaro-connect + - linux-kernel +author: linaro +date: 2016-09-26T18:12:46.000Z +link: /news/linaro-announces-first-development-board-compliant-96boards-iot-edition-specification/ +related: [] + +--- + +Cambridge, UK; 26 September 2016 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today at Linaro Connect Las Vegas 2016 announced availability of the 96Boards ‘Carbon’ board - the first development board compliant with the 96Boards IoT Edition (IE) specification. The board is the latest addition to the 96Boards family, the open specification defining a platform for the delivery of low-cost 32-bit and 64-bit Arm ecosystem developer boards. It is available to purchase from [SeeedStudio](https://www.96boards.org/product/carbon/). + +Designed by SeeedStudio and Linaro, the 96Boards Carbon is an IoT reference development board targeted at IoT and embedded developers, the hobby community, and the open-source community. The board features the STM32F401RE Cortex-M4 SoC with 512KB onboard flash and *Bluetooth*® wireless technology LE. The board will be launched with Zephyr pre-installed. This is a small, scalable, real-time OS for use on resource-constrained systems. + +The [IoT Edition (IE) specification](https://github.com/96boards/documentation/blob/master/Specifications/96Boards-IE-Specification.pdf) is a joint effort between 96Boards, the Linaro IoT and Embedded (LITE) Group and its members. This specification is intended to foster the delivery of IoT devices using Arm Cortex-A and Cortex-R/M processors targeted at software developers, the maker community, higher education, and embedded OEMs. To comment on the specifications, please visit the [Specification forum](https://discuss.96boards.org/c/specification/6). If you wish to be involved in defining future versions of the specifications please contact 96Boards@Linaro.org for information about joining the Linaro 96Boards Group. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+\\*\\*COMPONENT\\*\\* + +\\*\\*DESCRIPTION\\*\\* +
+SoC + +STMicroelectronics STM32F401RE +
+CPU + +Cortex-M4 +
+Clock speed + +84MHz maximum +
+SRAM + +96KB +
+Storage + +512KB onboard Flash +
+Wireless + +nRF51822 Bluetooth* +
+USB + +2 x Micro USB +
+Expansion Interface + +30 pin low speed expansion connector: +3.3V, +5V, VCC, GND, UART, I2C, SPI, GPIO x4 +
+Digital Out Voltage + +3.3V +
+Analog In Voltage + +0-3.3V +
+Analog Pins + +6 +
+LED + +1 x UART Tx, 1 x UART Rx, 1 x Power, 1 x Bluetooth, 2 x User LEDs +
+Button + +RST (Reset the STM32F4), BOOT0 (Boot into bootloader for flashing) +
+Power Source + +micro-USB +
+OS Support + +Zephyr +
+Size + +60x30mm +
+ +\*Note: Flashing the nRF51 chip requires a SWD programmer such as an ST-Linkv2 or Segger JLink + +For more information, visit [https://www.96boards.org/product/carbon/](https://www.96boards.org/product/carbon/) + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit[ ](/) and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-first-development-board-compliant-96boards-tv-platform-specification.mdx b/src/content/blogs/linaro-announces-first-development-board-compliant-96boards-tv-platform-specification.mdx new file mode 100644 index 0000000..3f03c60 --- /dev/null +++ b/src/content/blogs/linaro-announces-first-development-board-compliant-96boards-tv-platform-specification.mdx @@ -0,0 +1,40 @@ +--- +title: Linaro Announces First Development Board Compliant with 96Boards TV + Platform Specification +description: "Cambridge, UK: 30 August 2016" +image: linaro-website/images/blog/40965990761_090a30658a_k +tags: + - arm + - linux-kernel + - open-source +author: linaro +date: 2016-08-30T11:41:20.000Z +link: /news/linaro-announces-first-development-board-compliant-96boards-tv-platform-specification/ +related: [] + +--- + +Cambridge, UK: 30 August 2016 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced support for the HiSilicon ‘Poplar’ board - the first development board compliant with the 96Boards Enterprise Edition TV Platform specification. The board is the latest addition to the 96Boards family, the open specification defining a platform for the delivery of low-cost 32-bit and 64-bit Arm ecosystem developer boards. It is available to purchase for under $100 from [Tocoding](http://en.tocoding.com/index.php/96boards-poplar/)[ Technologies](http://en.tocoding.com/index.php/96boards-poplar/). + +Developed by HiSilicon, Poplar is a TV reference development board targeted at set-top box (STB) developers, the hobby community, and the open-source community. The board features the Hi3798C V200 with an integrated quad-core 64-bit Arm Cortex A53 processor and high performance Mali T720 GPU, making it capable of running any commercial set-top solution based on Linux or Android. Its high performance specification also supports a premium user experience with up to H.265 HEVC decoding of 4K video at 60 frames per second. The board will be launched with Android 5.1.1 pre-installed for an instant boot out of the box. + +The TV Platform specification is a joint effort between 96Boards, the Linaro Digital Home Group (LHG) and its members. The specification is designed to deliver a low-cost, high performance board for software developers working on advanced media frameworks and secure digital media delivery and playback solutions. The Poplar board delivers the latest 64-bit Armv8 platform with advanced codecs and graphics support, combined with a full complement of audio and video interfaces. Remote control, tuner card, and SmartCard module add-ons are also available. The optional tuner card enables traditional linear services delivered via terrestrial, cable and satellite to be simultaneously processed with media content received via broadband sources (OTT, IPTV). + +“Providing the Poplar board is a key part of our strategy to enable all players in this ecosystem to quickly and easily develop and prototype new state of the art digital home solutions with our SoCs,” said Ji Wang, Vice President of Engineering at HiSilicon. “Our partnership with Linaro and 96Boards not only ensures that independent developers, our partners and customers have access to our SoC in a convenient form factor with a choice of interfaces, but will also ensure that they have the necessary software building blocks to accelerate their own development.” + +The Poplar board will serve as a common platform for LHG members to continue creating optimized, high-performance secure media solutions for Arm on both Linux- and Android-based platforms. Licensees of the RDK (Linux) will be able to create Open Embedded/Yocto RDK builds for Poplar. The Poplar board will also serve as a common development platform for Android TV (AOSP) as well as for TVOS-based STB solutions used in China. + +“The Poplar board is an exciting example of what can be achieved through collaboration,” said Mark Gregotski, Director of the Linaro Digital Home Group (LHG). “Software engineers now have access to a rich and powerful platform to develop innovative solutions for the digital home. Linaro’s Linux kernel support and reference software builds will enable developers to focus on differentiating their solutions while building on a high-performance and reliable foundation.” + +Developers using the Poplar board can experiment with “secure world” operating systems, such as OP-TEE, running on Arm TrustZone™, with reference platform builds provided by Linaro. The Poplar board has security processing capabilities that allow developers to integrate commercial DRMs and downloadable conditional access (DCAS) solutions. Poplar supports HDCP 2.2 copy protection to protect 4K Ultra HD content. + +![Chart 4 Image](/linaro-website/images/blog/chart-4) + +For more information, visit [http://www.96boards.org/product/poplar/](https://www.96boards.org/product/poplar/) + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit[ ](/) and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-first-lts-monarch-release-opendataplane.mdx b/src/content/blogs/linaro-announces-first-lts-monarch-release-opendataplane.mdx new file mode 100644 index 0000000..01503b5 --- /dev/null +++ b/src/content/blogs/linaro-announces-first-lts-monarch-release-opendataplane.mdx @@ -0,0 +1,42 @@ +--- +title: Linaro Announces First LTS Monarch Release of OpenDataPlane +description: "Cambridge, UK: 18 August 2016" +image: linaro-website/images/blog/electricity-1288717_1920 +author: linaro +date: 2016-08-18T13:34:13.000Z +link: /news/linaro-announces-first-lts-monarch-release-opendataplane/ +tags: [] +related: [] + +--- + +Cambridge, UK: 18 August 2016 + +Linaro Ltd, the collaborative engineering organization developing open-source software for its member companies, today announced the availability of the first Long Term Support (LTS) Monarch release of OpenDataPlane®. The OpenDataPlane project is an open-source, cross-platform set of application programming interfaces (APIs) for the networking Software Defined Data Plane.The code released is production ready and has been demonstrated by Linaro Networking group (LNG) members and their partners. + +“This new OpenDataPlane Monarch LTS release provides real-world applications a stable and optimized set of APIs that run on accelerated Arm® Cortex®-based SoCs and SmartNICs. They can even run on commodity x86 hardware due to the high level of abstraction offered that can span the different architectures,” said François Frédéric Ozog, LNG Director at Linaro. “We’re looking forward to the release of commercial implementations later this month and expect to demonstrate a new L3FWD application and others in September at the next Linaro Connect in Las Vegas.” + +This Monarch LTS release of OpenDataPlane (ODP) will enable other projects to leverage the acceleration provided by the ODP APIs knowing that the code base will be fully supported for the foreseeable future. Work has already begun on network protocol stacks such as OpenFastPath (OFP), products like the NGiNX web server accelerated with ODP and OFP and libraries like OpenSSL that provide crypto acceleration via ODP. In addition, ODP and ODP-based products, such as OFP, NGiNX and OpenSSL, can now be made available as packages in widely known Linux distributions such as CentOS, Debian and OpenEmbedded. + +Reference implementations of OpenDataPlane provided by LNG as well as production grade implementations provided by various silicon partners will be available with this release. To accompany the release, Linaro has also launched a validation test suite that permits users and vendors to verify API compatibility between different ODP implementations. The specification, LNG reference implementations, and validation test suite are available today at [OpenDataPlane.org](http://opendataplane.org/). Performance implementations supporting hardware platforms offered by Cavium, Inc. and NXP will be available later this month. + +OpenDataPlane is developed jointly by LNG members and the wider open-source community to represent the interests of application developers, silicon vendors and software solution providers. + +“To meet the challenges associated with next-generation 5G networks, delivering an energy-efficient, high-performance and low-latency data plane which delivers application portability and vendor interoperability is critical,” said Phil Bourekas, director of network segment marketing, Arm. “The LTS Monarch release of ODP allows application developers to adopt it with confidence, and is a milestone in enabling the Arm ecosystem to offer differentiated hardware innovations with stable and consistent APIs across the entire network infrastructure.” + +“By achieving this milestone release of OpenDataPlane (ODP), Linaro and all of the Linaro Network Group members are actively demonstrating the real benefits of our collaborative engineering efforts,” said Larry Wikelius, Vice President Software Ecosystem and Solutions Group at Cavium, Inc. “ODP is an outstanding proof point for the value of standard interfaces that allow Armv8 SOC vendors to showcase differentiating performance and features while still supporting leading software applications. Cavium, Inc. is proud to continue its tradition of open source community leadership and intends to deliver Monarch across the range of ThunderX® and OCTEON TX™, and OCTEON® III product families.” + +“The Monarch release represents a very significant milestone for the ODP project at large, and for Enea as a member and contributor” says Daniel Forsgren, SVP Product Management at Enea. “We are excited to see that the ODP project has delivered on our expectations regarding performance and scalability, and we now have a production-ready ODP version that we will use as a platform for our work.” + +"ODP Monarch is a significantly improved API and Linux generic implementation that provides easier integration for network function Apps", said Aiguo Cui, Chief Architect, Huawei Technologies Co., Ltd. "We believe this release will be an important milestone in ODP history, and we will make our chipset support ODP Monarch and recommend it to our partners." + +"OpenDataPlane is an important part of our networking vision as it brings together all networking Systems on Chips (SoCs) enabling portability, high capacity and power efficient implementation," said Jarmo Hillo, Head of Processor Technology at Nokia Bell Labs. "This new Monarch release realizes ODP's original commercial promise and we look forward continuing our collaboration and integration with OpenFastPath to meet the ever evolving needs of the networking market." + +“ODP’s new OpenDataPlane ® LTS Monarch release supports rapid migration to a platform with accelerators, thereby enabling the throughput and efficiency required by today’s enterprise edge networking markets,” said ‎Sam Fuller, Head of Strategy System Solutions, Digital Networking at NXP Semiconductors. “NXP is pleased to announce support for the Monarch ODP API in a release that is available now on our multicore QorIQ processors based on Arm Cortex technology.” + +**About LNG** +The Linaro Networking Group (LNG) was founded in February 2013 and now consists of fourteen member companies including Arm, Broadcom, Cavium, Inc., Cisco, ENEA, Ericsson, Freescale, HiSilicon, MontaVista, Nokia, Texas Instruments, Wind, ZTE and Linaro. The OpenDataPlane project was established from the start of LNG to produce an open-source, cross-platform application programming interface (API) for the networking data plane, that offers both portability and automatic access to vendor-optimized platform acceleration capabilities, as well as linear scalability for applications deployed in many-core system environments. To find out more, please visit [http://www.opendataplane.org/](http://www.opendataplane.org/) + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-fujitsus-collaboration-accelerate-high-performance-computing-arm.mdx b/src/content/blogs/linaro-announces-fujitsus-collaboration-accelerate-high-performance-computing-arm.mdx new file mode 100644 index 0000000..738beb8 --- /dev/null +++ b/src/content/blogs/linaro-announces-fujitsus-collaboration-accelerate-high-performance-computing-arm.mdx @@ -0,0 +1,49 @@ +--- +title: Linaro Announces Fujitsu's Collaboration to Accelerate High Performance + Computing on Arm +description: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm® ecosystem, today announced that Fujitsu + Limited1 has joined Linaro as a member of the Linaro Enterprise Group (LEG) + and a founding member of the LEG High Performance Computing Special Interest + Group (HPC SIG). +image: linaro-website/images/blog/CCS_banner_image +tags: + - arm + - linaro-connect + - linux-kernel +author: linaro +date: 2017-03-06T08:01:29.000Z +link: /news/linaro-announces-fujitsus-collaboration-accelerate-high-performance-computing-arm/ +related: [] + +--- + +Budapest, Hungary; 6 March 2017 + +Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced that Fujitsu Limited1 has joined Linaro as a member of the Linaro Enterprise Group (LEG) and a founding member of the LEG High Performance Computing Special Interest Group (HPC SIG). + +“We’re very pleased to welcome Fujitsu to the LEG and the HPC SIG, and we look forward to working with them and the other industry leading HPC SIG members to enable the most effective deployment and management of Arm-based HPC solutions,” said George Grey, Linaro CEO. “Fujitsu’s record in the supercomputer market speaks clearly to their experience, and the public commitment they made last year to Arm-based HPC solutions for the Post-K computer project in Japan sets an impressive vision that we look forward to helping them achieve.” + +Through the LEG HPC SIG, Fujitsu is ready to cooperate with other members to help accelerate the development of the Arm HPC ecosystem. Discussions at Linaro Connect in Budapest are expected to focus on enabling the OpenHPC community effort to build the Arm HPC software stack and improve Arm cluster competitiveness, SVE2 support for the QEMU Tiny Code Generator (TCG) instruction simulator, HPC compiler optimization, the Linaro Developer Cloud and future efforts to establish a development environment for Independent Software Vendor (ISV) developers to expand the software portfolio for Arm-based clusters. + +“Developing a supercomputer that is many times faster than any of those currently available is clearly a challenging process and involves leveraging Fujitsu's top hardware and software talent, as well as the help of partner companies such as Arm,” said Naoki Shinjo, SVP, Head of Next Generation Technical Computing Unit, Fujitsu. “The engineering resources of Linaro and its group's members provide a unique way for us and our partners to work together on shared challenges and accelerate the deployment of new systems for a variety of HPC uses, including improved scientific simulations for climate change models, disaster prediction, drug discovery and the development of new fuels.” + +Fujitsu has consistently adopted the most appropriate instruction set architecture (ISA) during its 40 years’ experience in supercomputers. The company has chosen to adopt the Armv8-A architecture with SVE in order to position the Post-K computer, currently under joint development by RIKEN and Fujitsu, so it can benefit from a broader software ecosystem than the K computer, an earlier version which was ranked as the world's number one supercomputer in 2011, and remains ranked seventh today. The Post-K computer will be built around a new many-core HPC processor with 512-bit wide SIMD, high scalability and very efficient performance per watt. This processor is the result of the close technology partnership between Fujitsu and Arm. + +“Fujitsu joining Linaro and the formation of the LEG HPC SIG are key milestones in the advancement of the Arm HPC ecosystem,” said Noel Hurley, vice president and general manager, Business Segments Group, Arm. “This will immediately benefit the fast-moving HPC community, many of whom are already seeing value in the massively-parallel Armv8-A architecture. We look forward to working together to solve the challenges in this market, so that the deployments of Arm-based HPC systems will bring the industry a step closer to efficient Exascale computing.” + +LEG was established in November 2012 as the first vertical segment group within Linaro. The group was established to accelerate Arm server ecosystem development and it extended the list of Linaro members beyond Arm silicon vendors to Server OEM’s and commercial Linux providers. Toward the end of 2016, six of the LEG members decided to extend the LEG work to include HPC open-source software development for the Arm enterprise ecosystem within a Special Interest Group. + +Linaro now has over 35 member companies working together to accelerate open source software development. As the range and capabilities of SoCs have grown exponentially, the benefits to be gained from collaboration on common open source software across the industry increase. Linaro’s goals are to enable more rapid innovation in the industry through using shared resources to engineer common software elements, enabling each member to focus more of their own resources on product differentiation. + +**Notes:** + +1: Fujitsu is the leading Japanese information and communication technology (ICT) company, offering a full range of technology products, solutions, and services. Approximately 156,000 Fujitsu people support customers in more than 100 countries. We use our experience and the power of ICT to shape the future of society with our customers. Fujitsu Limited (TSE: 6702) reported consolidated revenues of 4.7 trillion yen (US$41 billion) for the fiscal year ended March 31, 2016. For more information, please see [http://www.fujitsu.com](http://www.fujitsu.com). + +2: The Scalable Vector Extension (SVE) has been developed by Arm specifically for vectorization of HPC scientific workloads as an extension to the Armv8-A architecture. It complements the NEON 128-bit SIMD (Single Instruction, Multiple Data) instruction set and, while the longer SVE vectors benefit HPC, the extension also offers an opportunity to benefit other systems over the longer term as they scale to support increased data level parallelism. + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx b/src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx new file mode 100644 index 0000000..49145df --- /dev/null +++ b/src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx @@ -0,0 +1,64 @@ +--- +author: shovan-sargunam +date: 2015-01-22T16:50:47.000Z +description: "CAMBRIDGE, UK – 22 JANUARY 2015" +link: /news/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong/ +tags: + - linaro-connect +title: " Linaro Announces Keynote Speakers and Demos For Upcoming Linaro Connect + Hong Kong" +related: [] + +--- + +CAMBRIDGE, UK – 22 JANUARY 2015 + +Linaro, the collaborative engineering organization developing open source software for the Arm architecture, today announced the keynote speakers for the upcoming [Linaro Connect ](https://resources.linaro.org/en/tags/37aaba93-c015-4750-b4f1-a60bd5afd13d)Hong Kong 2015 that will take place February 9th – 13th in Hong Kong, China.  The week will kick-off with Linaro’s Chief Executive Officer, George Grey, welcoming attendees to Linaro Connect and giving an update on the latest Linaro developments.  Each day will then begin with a keynote from various industry leaders and the week will end with a rich assortment of demonstrations, including much of Linaro’s latest Armv8-A 64-bit software developments. + +**Industry speakers for** [**Linaro Connect Hong Kong 2015 keynotes**](https://resources.linaro.org/en/tags/37aaba93-c015-4750-b4f1-a60bd5afd13d) **include:** + +* Jon Masters - Chief Arm Architect, Redhat + +* Dejan Milojicic - Senior Researcher & Manager, HP Labs + +* Bob Monkman - Enterprise Segment Marketing Manager, Arm + +* Greg Kroah-Hartman – Linux Foundation Fellow + +* Warren Rehman -  Android Partner Engineering Manager, Google + +The Linaro Connect agenda also features sessions covering: Android, Armv8-A, Automation & Validation, Digital Home, Enterprise Servers, LAVA, Linux Kernel, Networking, Power Management, Security, Toolchain, Virtualization and multiple training sessions. Additionally, [Linaro Connect Hong Kong 2015 will feature the following demos: ](https://resources.linaro.org/en/tags/37aaba93-c015-4750-b4f1-a60bd5afd13d) + +* Linaro Clear Key CDM + +* Chromium on Wayland with Gstreamer + +* Linaro Web Browser Test Framework + +* Demo of VLANd + +* l2fwd + +* OVS - x86 - Arm + +* ODP on Cavium platform + +* OpenJDK running on all Armv8 hardware + +* OpenStack running on Armv8 hardware + +* Android support for clang 3.6 and gcc 5.0 + +* Ceph on remote server cluster + +* UEFI on BeagleBone Black + +In addition, the engineering teams will be working throughout the week on the available hardware and this will spawn additional demos, including the results of the LAVA team Week of Hacking. The demos will feature in some of the keynotes and at other times during the week, but participants will get to see most of them all together at Demo Friday. The Linaro Networking Group (LNG) is also planning a special demo session at the end of its focused sessions on Thursday. + +Linaro Connect is attended by the best and brightest in the Linux on Arm industry. The event is held twice each year in different regions around the world and they bring together a unique combination of engineers, program managers, technical architects, project managers, users and industry experts. The events give participants a place to learn about emerging trends, current community and Linaro activities as well as new ways of optimizing the latest Arm technology. Linaro Connect is *the* place to help solve community problems.  Engineers returning to their companies after Linaro Connect take back the latest software developments around Arm and new ways of optimizing the latest Arm technology. + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of open source on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit . diff --git a/src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2013-event-santa-clara.mdx b/src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2013-event-santa-clara.mdx new file mode 100644 index 0000000..92d8329 --- /dev/null +++ b/src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2013-event-santa-clara.mdx @@ -0,0 +1,32 @@ +--- +author: linaro +date: 2013-10-15T11:29:28.000Z +description: CAMBRIDGE, UK - 15 OCT 2013 +link: /news/linaro-announces-keynote-speakers-linaro-connect-usa-2013-event-santa-clara/ +title: Linaro Announces Keynote Speakers for Linaro Connect USA 2013 Event in + Santa Clara +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 15 OCT 2013 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, today announced the keynote speakers for the upcoming Linaro Connect USA 2013 (LCU13) that will take place October 28th – November 1st in Santa Clara, California.  The conference will feature several keynote speakers that will speak on a range of topics from High End Telecom Networking to Transformative Technology in the Internet of Things (IoT), data center and mobile, new developments in Linux and the Open Compute Project.  Linaro’s Chief Executive Officer, George Grey, will welcome attendees to LCU13 and speak about the future for open source software across a full range of segments, from servers to the Internet of Things (IoT). + +Keynote speakers scheduled for the Linaro Connect USA 2013 include: + +* George Grey - Chief Executive Officer, Linaro +* Jim Zemlin – Executive Director, Linux Foundation +* Frank Frankovsky – Vice President of Infrastructure, Facebook +* Jarmo Hillo – Head of Processor Technology, Nokia Solutions and Networks + +In addition to the above keynotes at LCU13, attendees are invited to join the Arm TechCon keynote by Arm CEO Simon Segars. The Linaro Connect week-long agenda also features sessions on Android, Graphics and Multimedia, Kernel Consolidation, Platform Development, Power Management, QA and Infrastructure, Tools, Validation and LAVA, and training.  Along with the regular track sessions, there will be an additional networking mini summit on Tuesday October 29th from 9.00am - 1.00pm. + +Held at the Santa Clara Convention Center, LCU13 is the place to learn about the future of Linux on Arm.  Engineers returning to their companies after Linaro Connect take back the latest software developments around Arm and new ways of optimizing the latest Arm technology.  This Linaro Connect is co-located with [Arm TechCon](http://www.armtechcon.com/), which runs from Tuesday October 29th to Thursday October 31st. Separate registration is required to attend the sessions of both events, but co-location gives attendees of Linaro Connect a unique chance to see most of Arm’s hardware and software ecosystem in one place at the Expo. + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 140 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. diff --git a/src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2014.mdx b/src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2014.mdx new file mode 100644 index 0000000..aecc23f --- /dev/null +++ b/src/content/blogs/linaro-announces-keynote-speakers-linaro-connect-usa-2014.mdx @@ -0,0 +1,35 @@ +--- +author: jennifer-castelino +date: 2014-08-20T14:21:50.000Z +description: "CAMBRIDGE, UK; 20 AUGUST 2014:" +link: /news/linaro-announces-keynote-speakers-linaro-connect-usa-2014/ +tags: + - linaro-connect +title: Linaro announces Keynote Speakers for Linaro Connect USA 2014 +related: [] + +--- + +CAMBRIDGE, UK; 20 AUGUST 2014: + +Linaro Ltd., the collaborative engineering organization developing open source software for the Arm architecture, announced today the keynote speakers for the Linaro Connect USA event to be held September 15-19 at the Hyatt Regency in Burlingame, California. Linaro Connect is an international conference that provides an open forum for the Linux on Arm community to discuss the latest software developments around the Arm architecture. + +Linaro Connect is attended by the best and brightest in the Linux on Arm industry.  Linaro Connects are held twice each year in different regions around the world and they bring together a unique combination of engineers, program managers, technical architects, project managers, users and industry experts. It gives participants a place to learn about emerging trends, current community and Linaro activities as well as new ways of optimizing the latest Arm technology.  It is *the* place to help solve community problems. + +Additionally, Linaro Connect USA 2014 will feature demos of Linaro members’ products.  The demos will take place during the week and give participants a chance to see the work of Linaro and its members in action. + +**Confirmed keynote speakers and topics include:** + +* Paul Eremenko, Director of Project Ara at Google, will discuss  “What if hardware was more like software? Google’s Project Ara and the democratization of the hardware ecosystem.” + +* Dr. Ken Morse, CTO, Connected Devices and SP Video Infrastructure at Cisco, will present “Enabling Internet Speed for Service Providers” + +* Linda Knippers, Distinguished Technologist at HP, will give a keynote on “Fueling HP Moonshot” + +* Christian Reis, VP Hyperscale at Canonical, will present “Mythology and Potential of the Arm Server” + +* George Grey, CEO of Linaro, will give an overview of  "Linaro Past, Present and Future" + +* Dr. Christos Kolias, Senior Research Scientist at Orange Silicon Valley  will present  “NFV:  Empowering the Network” + +**About Linaro:** Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a collaborative engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral, its goal is to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The majority of Linaro’s engineering work is open to all online. To find out more, please visit:  [](/). diff --git a/src/content/blogs/linaro-announces-latest-96boards-product-aosp-development.mdx b/src/content/blogs/linaro-announces-latest-96boards-product-aosp-development.mdx new file mode 100644 index 0000000..836be95 --- /dev/null +++ b/src/content/blogs/linaro-announces-latest-96boards-product-aosp-development.mdx @@ -0,0 +1,202 @@ +--- +author: linaro +featured_on_home: true +published: true +date: 2017-04-26T11:16:13.000Z +home_cat: boards +title: Latest 96Boards Product for AOSP Development +link: /news/linaro-announces-latest-96boards-product-aosp-development/ +image: linaro-website/images/blog/Client_Devices_banner_pic +categories: + - news +description: Linaro today announced the availability of the HiKey 960 96Boards + development platform from Linaro Core member Huawei. Read more here. +tags: [] +related: [] + +--- + +[96Boards OpenHours](https://www.96boards.org/) 25 April 2017 6.00pm PST, 26 April 2017 9.00am CST] Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced the availability of the [HiKey 960 96Boards](https://www.96boards.org/product/hikey960/) development platform from Linaro Core member Huawei. Designed to provide access to the latest Arm mobile technology for AOSP developers, this new board is now listed on the 96Boards website and is available through global distribution channels. + +While all AOSP developers will find the board useful, it is developers who want to work closer to the hardware who will gain maximum advantage from it. This will include mobile developers looking to enable support and innovative functionality with new and existing sensors, security and other peripheral hardware and software, and developers working on derivative products for markets like digital signage, point of sale (POS), robotics and others outside the traditional mobile AOSP space. + +> “We are very pleased to be working with Linaro members Arm, Huawei, Google, Archermind and LeMaker on this product”, said George Grey, Linaro CEO. “The HiKey 960 delivers on the goal of 96Boards to provide access to the latest Arm technology to the developer community, with support for the latest Huawei mobile SoC featuring high performance Arm Cortex®-A73 cores coupled with the latest generation of Arm Mali™ GPU technology.” + +The new board is based around the Huawei Kirin 960 octa-core Arm big.LITTLE™ processor with four Arm Cortex-A73 and four Cortex-A53 cores with 3GB of LPDDR4 SDRAM memory, 32GB of UFS 2.0 flash storage, and the latest generation Mali-G71 MP8 graphics processor. Available now through Archermind ([Alpha-Star](https://www.alpha-star.org/hikey960)) and LeMaker (http://www.lenovator.com/product/80.html), the HiKey 960 96Boards development platform is on sale at US$239 and is expected to be available through local distribution in the US, EU and Japan in early May. + +> “We are very pleased to bring our highest performance mobile SoC to the 96Boards program and to be working with Google and Linaro in the AOSP project,” said Benjamin Wang, Deputy General Manager of Huawei Wireless Terminal Chipset Business Unit,. “We expect that developers will be excited to get access to the latest Arm CPU and GPU technology, as well as new features such as a PCIe M.2 card interface for additional high performance storage or wireless cards, all running using the latest AOSP builds.” + +Google develops Android to run on multiple architectures and the Linaro engagement is in particular aimed at collaboration with Arm-based system-on-chip (SoC) partners. Android Open Source Project (AOSP) has been a key part of Linaro’s work since its founding in 2010, and Linaro’s AOSP contributions have now spanned 24 kernel versions from Linux kernel 2.6.36 in Android Honeycomb (3.0) to Linux 4.10 today. The HiKey 960 will be the second HiKey officially supported as an Android reference board and it will bring new levels of performance to Android developers. + +> “Arm is committed to providing platform developers with access to new technologies in an effort to support ongoing innovation on Arm-based mobile platforms,” said Laurence Bryant, vice president of personal mobile compute, Business Segments Group, Arm. “The HiKey 960 platform integrates the latest big.LITTLE technology that combines the Cortex-A73, the most powerful yet power efficient mobile CPU, the Cortex-A53 for further efficiency and the latest Mali GPU, Mali-G71. Running on the latest AOSP builds, we’re enabling a range of advanced solutions to be brought to market more quickly.” + +Initial software support for the board is provided in the AOSP source tree based on the Android Common Kernel using the Linux 4.4 kernel release. Linaro and Huawei are also working on the Linux 4.9 based Android Common kernel and maintaining support for the Kirin 960 SoC in the mainline kernel.org tree, allowing for the availability of multiple Linux distributions for this board in the future. + +Information about the HiKey 960 board and Running Android on it will be available here: [http://source.android.com/source/devices.html](http://source.android.com/source/devices.html). Linaro is providing instructions for developers here: [http://linaro.co/hikey960-start](https://www.96boards.org/documentation/consumer/hikey/hikey960/getting-started/index.html). + +**HiKey 960 board specifications** + + + + +\\\\\\*\\\\\\*Component\\\\\\*\\\\\\* +\\\\\\*\\\\\\*Description\\\\\\*\\\\\\* + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+\\\\\\*\\\\\\*SoC\\\\\\*\\\\\\* + +Kirin 960 octa-core CPU +4x Cortex-A73 cores to 2.4 GHz +4x Cortex-A53 cores to 1.8 GHz +
+Mali-G71 MP8 GPU +
+\\\\\\*\\\\\\*Software\\\\\\*\\\\\\* + +AOSP with 4.4 AOSP common kernel +
+\\\\\\*\\\\\\*Storage\\\\\\*\\\\\\* + +32GB UFS 2.0 flash storage and microSD card slot +
+\\\\\\*\\\\\\*Video Output / Display Interface\\\\\\*\\\\\\* + +HDMI 1.2a up to 1080p plus 4-lane MIPI DSI +
+\\\\\\*\\\\\\*Connectivity\\\\\\*\\\\\\* + +Dual-band 802.11 b/g/n/ac WiFi and \\\\\\_Bluetooth\\\\\\_® wireless technology 4.1 with on board antennas +
+\\\\\\*\\\\\\*USB\\\\\\*\\\\\\* + +2 x USB 3.0 type A host ports +
+1 x USB 2.0 type C OTG port +
+\\\\\\*\\\\\\*Camera\\\\\\*\\\\\\* + +1x 4-lane MIPI CSI +
1x 2-lane MIPI CSI
\\\\\\*\\\\\\*Expansion\\\\\\*\\\\\\* +PCIe Gen2 on M.2 M Key connector +
+40 pin low speed expansion connector +1.8V, +5V, DC power, GND, 2x UART, 2x I2C, SPI, I2S, 12x GPIO +
+60 pin high speed expansion connector 4L MIPI DSI, 2L+4L MIPI CSI, 2x I2C, SPI (48M), USB 2.0 +
+\\\\\\*\\\\\\*Misc\\\\\\*\\\\\\* + +LEDs for WiFi & Bluetooth, 4x user LEDs, power button +
+\\\\\\*\\\\\\*Power Supply\\\\\\*\\\\\\* + +8V-18V/2A via 4.75/1.7mm power barrel (EIAJ-3 Compliant),12V/2A power supply recommended +
+\\\\\\*\\\\\\*Dimensions\\\\\\*\\\\\\* + +85mm x 55mm +
+ +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [http://www.96Boards.org](https://www.96boards.org/). + +*** + + + +Buy Now + +* [Seeed](https://www.seeedstudio.com/hikey-960-development-board-4gb-ram-version-p-3028.html) (Worldwide) – $239.00 +* [Alpha Star](https://en.alpha-star.org/hikey960) +* [Switch Science](http://linaro.co/hikey960-switch) (Japan) + +[Documentation](https://www.96boards.org/documentation/consumer/hikey/hikey960/index.html) & [Support Forum](https://discuss.96boards.org/c/products/hikey960) diff --git a/src/content/blogs/linaro-announces-launch-of-machine-intelligence-initiative.mdx b/src/content/blogs/linaro-announces-launch-of-machine-intelligence-initiative.mdx new file mode 100644 index 0000000..a723f2e --- /dev/null +++ b/src/content/blogs/linaro-announces-launch-of-machine-intelligence-initiative.mdx @@ -0,0 +1,37 @@ +--- +title: Linaro Launches Machine Intelligence Initiative +description: "The Machine Intelligence Initiative aims to reduce redundant + engineering in the deep learning and neural network acceleration ecosystem. + Read more here. " +date: 2018-09-17T09:00:00.000Z +tags: + - linaro-connect + - arm +author: linaro +related: [] + +--- + +Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, announced today that it is launching its Machine Intelligence Initiative as a focal point for collaborative engineering in this space. Arm is supporting this new initiative with engineering resources and by opening up Arm’s Neural Network (NN) inference engine to external contributions. Arm, Linaro, and the other members of the Machine Intelligence Initiative will collaborate to reduce redundant engineering and fragmentation in the deep learning and neural network acceleration ecosystem and accelerate development of new technology solutions. + +“The development of a common software interface supporting industry-leading frameworks and tools is one of the biggest requirements in accelerating adoption of machine learning by developers,” said Robert Elliott, director of applied machine learning, Arm. “Arm is addressing this with our donation of the Arm NN inference engine to the Machine Intelligence Initiative, which will quickly enable the Linaro community and Arm ecosystem to deploy machine learning across the widest number of applications.” + +Neural network acceleration in Arm®-based platforms provides an unprecedented opportunity for new intelligent devices. Today however, every IP vendor forks existing models and frameworks to integrate their hardware blocks and then tunes for performance. This leads to duplication of effort, an increasing perpetual cost of re-integration for every new rebasing, and an overall increased total cost of ownership. In addition, the growing amount of data captured by sensors and connected devices, coupled with real-time constraints and the cost to move large data sets from the edge to the cloud, intensifies the need to manage and execute big data analytics and Machine Learning (ML) inference engines at the edge, wherever possible. + +“In order to accelerate innovation in machine intelligence on Arm, players in the Arm ecosystem need to collaborate,” said Andrea Gallo, VP of Segments and Strategic Initiatives at Linaro. “Through the Machine Intelligence Initiative, Linaro and members of the initiative aim to adopt a unified model description format and framework runtime API, an optimized inference engine for Arm application processors and a flexible plug-in architecture to integrate each NN solution and use members’ internal resources to focus on product competitive advantage.” + +Linaro’s Machine Intelligence Initiative will initially focus on inference for Arm Cortex®-A SoCs and Cortex-M MCUs running Linux, Android, and Zephyr, both for edge compute and smart devices. As part of the remit, the team will collaborate on defining an API and modular framework for an Arm runtime inference engine architecture based on plug-ins supporting dynamic modules and optimized shared Arm compute libraries. The work will rapidly develop to support a full range of processors, including CPUs, NPUs, GPUs, and DSPs and it is expected that the Arm NN will be a crucial part of this. + +Linaro expects to quickly expand the scope of this new initiative to include Cortex-M microcontrollers. + +“The TensorFlow team is excited to work with Arm and Linaro to expand support for edge devices, and we’re looking forward to integrating with the Arm NN library”, said Pete Warden, Technical lead of the TensorFlow mobile and embedded team at Google. “We think this kind of standard, open source interface for neural computing will improve the experience for product developers across the Arm ecosystem.” + +The Arm NN SDK, [announced in February 2018](https://www.arm.com/company/news/2018/02/arm-project-trillium-offers-the-industrys-most-scalable-versatile-ml-compute-platform), bridges the gap between existing neural network frameworks and power-efficient Arm Cortex CPUs, Arm Mali™ GPUs, or the Arm ML processor. It is a free-of-charge set of open-source Linux software and tools that enables machine learning workloads on power-efficient devices. It has reached a mature enough state that it can be deployed in production and others wish to make external contributions as an open source project managed by the community. + +In addition to working on reducing redundant software engineering, Linaro and its members are working on making the latest hardware available to developers in the 96Boards standard formats. Boards with processors accelerated for Machine Intelligence are already available from multiple vendors. + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96Boards.org/). diff --git a/src/content/blogs/linaro-announces-lemaker-as-a-member-of-the-linaro-community-boards-group.mdx b/src/content/blogs/linaro-announces-lemaker-as-a-member-of-the-linaro-community-boards-group.mdx new file mode 100644 index 0000000..2924887 --- /dev/null +++ b/src/content/blogs/linaro-announces-lemaker-as-a-member-of-the-linaro-community-boards-group.mdx @@ -0,0 +1,42 @@ +--- +excerpt: LeMaker to pair its hardware expertise with Linaro’s open source + software experience to enable software developers with new 96Boards hardware + platforms +title: Linaro announces LeMaker as a member of the Linaro Community Boards Group +description: LeMaker to pair its hardware expertise with Linaro' open source + software experience to enable software developers with new 96Boards hardware + platforms +image: linaro-website/images/blog/96boards-specification-consumer-edition-v2 +author: shovan-sargunam +date: 2015-06-19T23:04:38.000Z +tags: [] +link: /news/linaro-announces-lemaker-as-a-member-of-the-linaro-community-boards-group/ +related: [] + +--- + +## LeMaker to pair its hardware expertise with Linaro’s open source software experience to enable software developers with new 96Boards hardware platforms + +Shen Zhen, China; 20 June 2015 + +Linaro Ltd, the not­-for-­profit engineering organization developing open source software for the Arm® architecture, today at the Shen Zhen Maker Faire announced that Chinese Maker development platform vendor LeMaker has joined Linaro as a member of the new Linaro Community Boards Group (LCG). + +The LCG has been formed to support the new 96Boards initiative. 96Boards is the first open hardware specification that provides a platform for the delivery of compatible low ­cost, small footprint 32­bit and 64­bit Cortex­A boards. Standardized expansion buses for peripheral I/O, display and cameras allow the hardware ecosystem to develop a range of compatible add­on products that will work on multiple vendor’s 96Boards products over the lifetime of the platform. The LCG Steering Committee will manage the evolution of the 96Boards open specifications and the development of the 96Boards community. The 96Boards website provides software downloads and updates, information on compatible products, and forums for software developers, makers and OEMs to get community software support and downloads for all 96Boards products. + +“The China Maker and University markets are an incredibly large and fertile ground for innovation,” said Joe Bates, EVP of Member Services at Linaro. “We are very happy to be working with LeMaker to provide these markets with the latest Armv8 hardware platforms at an affordable price with robust open source software support. We look forward to seeing what Chinese Makers and students can build on these platforms.” + +LeMaker has joined the LCG to combine its professional experience of working with multiple SoC vendors and the LeMaker online community to deliver innovative development platforms to Chinese universities and the Maker community with Linaro's software expertise to deliver a new platform built on the latest Linux kernels and toolchains. This new platform is expected to be available in the fourth quarter of this year. + +“As a member of the Linaro Community Boards Group (LCG), LeMaker is pleased to participate in the marketing of the 96Boards in Chinese Universities and the Maker market,” said Mr. Tony Zhang, Co­Founder of LeMaker. “Linaro is a key place for collaborative software and hardware engineering on the Arm architecture and we are proud of our long­standing work with the Arm ecosystem, enabling the ‘LeMaker makes innovations more easier,’ for consumers around the world. LeMaker has developed the open­source platform for Banana Pro and today released three new products alongside this announcement. We believe in providing cost-­effective, best-­in-­class platforms for new developers to be able to create a wider array of applications with our SBC platform.” + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not­for­profit engineering organization with over 150 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non­differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). + +**About LeMaker** + +LeMaker is a global pioneer in establishing the open source innovation ecosystem, enabling the “LeMaker makes innovation easier”. Since March. 2014, with its powerful brand influence and great integration of resources, LeMaker has established the stable partnership with SoC firms and supply­chain companies. LeMaker has been empowering the makers and people or firms with new ideas to always stand at the forefront of innovation, by providing people around the world with comprehensive SBC platform and global on­line community service. LeMaker is committed to accelerate the development of open source and innovations. + +As used in this release, the term” LeMaker” refers to Shenzhen LeMaker Technology Co., Ltd. For more information, please visit www.lemaker.org diff --git a/src/content/blogs/linaro-announces-lite-collaborative-software-engineering-internet-things-iot.mdx b/src/content/blogs/linaro-announces-lite-collaborative-software-engineering-internet-things-iot.mdx new file mode 100644 index 0000000..6511ffe --- /dev/null +++ b/src/content/blogs/linaro-announces-lite-collaborative-software-engineering-internet-things-iot.mdx @@ -0,0 +1,54 @@ +--- +title: Collaborative Software Engineering for the IOT +description: Linaro announces the launch of the Linaro IoT and Embedded (LITE) + Segment Group allowing industry leaders to collaborate. Read more here. +image: linaro-website/images/blog/27094831048_6ecb96f52a_o +tags: + - iot-embedded + - linaro-connect + - linux-kernel + - open-source +author: linaro +date: 2016-09-26T17:11:36.000Z +link: /news/linaro-announces-lite-collaborative-software-engineering-internet-things-iot/ +related: [] + +--- + +Cambridge, UK; 26 September 2016 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced the launch of the Linaro IoT and Embedded (LITE) Segment Group. Working in collaboration with industry leaders, LITE will focus on delivering end to end open source reference software for more secure connected products, ranging from sensors and connected controllers to smart devices and gateways, for the industrial and consumer markets. + +Industry interoperability of diverse, connected and secure IoT devices is a critical need to deliver on the promise of the IoT market. Today, product vendors are faced with a proliferation of choices for IoT device operating systems, security infrastructure, identification, communication, device management and cloud interfaces. Vendors in every part of the ecosystem are offering multiple choices and promoting competing standards. Linaro and the LITE members will work to reduce fragmentation in operating systems, middleware and cloud connectivity solutions, and will deliver open source device reference platforms to enable faster time to market, improved security and lower maintenance costs for connected products. + +Initial technical work will be focused on delivering an end to end, cross-vendor solution for secure IoT devices using the Arm Cortex®-M architecture. This will include a bootloader, RTOS platform, security, communications, middleware and a choice of application programming tools. LITE will also work on Cortex-A based smart device and gateway solutions for IoT using Linux. + +“Linaro has been very successful in hosting collaboration within the Arm ecosystem to reduce fragmentation and deliver new open source technology into multiple markets, from mobile and digital home to networking and the enterprise data center,” said George Grey, Linaro CEO. “We see an opportunity to apply the same skills to the rapidly emerging IoT software market, and we intend to work with our members to deliver reference open source software platforms that implement non-differentiating but critical features such as end to end security from the device to the cloud, over the air software updates, emerging IoT standards and protocols, and interfaces to global cloud service providers. This will enable product vendors to focus on their differentiation and value add.” + +Linaro has recently joined the Linux Foundation Zephyr Project as a Platinum member, alongside Intel, NXP® and Synopsys. LITE plans to use both Zephyr and Linux as neutral industry platforms for delivery of its collaborative engineering output. Under the direction of its Steering Committee, LITE will also work with Arm to extend and expand the Arm mbed™ IoT Device Platform and ecosystem. In addition, LITE will evaluate integration of other open source RTOS solutions and platforms from global Cloud service and device management providers. + +Founding members of LITE are Arm, Canonical, Huawei, NXP, RDA, Red Hat, Spreadtrum, STMicroelectronics, Texas Instruments and ZTE. Additional ecosystem semiconductor vendors, software companies, service providers and product manufacturers are expected to join LITE over the coming months. + +LITE is today releasing a preview of technology that will be delivered in the initial LITE IoT Reference Platform release to be made in December 2016. An end to end open source sensor solution running on multiple vendor SoCs will be demonstrated at the opening keynote of the Linaro Connect conference in Las Vegas, USA, and made available to the developer community at [www.96Boards.org/carbon](https://www.96boards.org/product/carbon/). + +“The success of the Arm ecosystem is built on choice and the work of our many partners,” said Charlene Marini, vice president of segment marketing, Arm. “Linaro has a proven track record in fostering collaboration on developing, optimizing and maintaining software solutions across a diverse range of applications. Linaro will apply those same successful principles to LITE to help rapidly mature the IoT software ecosystem in support of the Arm architecture.” + +“The Internet of Things is driving the next wave of innovation across devices and the cloud,” said Oliver Ries, Director of Engineering at Canonical. “Snap packages, the universal app packaging format for Linux, allow developers to distribute the same application on any device irrespective of operating system, from IoT gateways all the way to cloud server. Canonical will work with LITE to enable Snap-based Ubuntu Core support in the Linaro Reference Platforms, leading the way in reducing fragmentation and accelerating time to market for secure Arm devices." + +"Having an end-to-end secure and open source software stack is critical to realizing the full potential of IoT integration between sensor, client devices and the cloud,” said Zhong Youping, Huawei LiteOS OSDT Director. “Linaro has demonstrated its ability to bring together partners to solve large engineering challenges.” + +“As leading providers of MCUs for the growing IoT market and as founding members of Zephyr, we are very pleased to see LITE adopt Zephyr as an open source development platform. This Linaro-led open source collaboration will help accelerate Zephyr on Arm to become one of the leading, easy-to-use IoT platforms, designed specifically with security in mind for the connected world” said Robert Oshana, NXP’s Senior Director & Head of Microcontrollers Software R\&D. + +"Red Hat has long advocated the need for open collaboration and innovation around IoT, driven by open source communities that have the agility to respond to the emerging needs of large-scale, industrial IoT,” said Karen Farmer, Global IoT initiative leader, Red Hat. "We believe that the path to scalability is through standardization and we are extending our efforts within Linaro from Enterprise Group to become a founding member of LITE, helping to drive standards for the development of Linux-based, commercial IoT platforms running on Armv8-A processors. Additionally, Red Hat's expertise in platform development, middleware and security features will help to extend the Arm-based IoT ecosystem and enable IoT developers to deliver their open source-based solutions more rapidly." + +“IoT is all about disruption and we expect to see not only a plethora of new hardware solutions, but also new approaches to software,” said Dr. Leo Li, Chairman and CEO of Spreadtrum Communications. “In working closely with Linaro we have learnt the value of collaboration with our competitors and partners on non-differentiating technology and we feel that LITE membership will not only help our existing strategies, but also help us be prepared to benefit from the potential disruption in the industry.” + +“As a long-time Linaro partner, ST sees this new initiative as a strong step in achieving our joint effort with Linaro to define and deliver a reference open-source software platform that can run over the multiple RTOSes used by our customers as well as Arm mbed,” said Laurent Desseignes, Ecosystem Marketing Manager, STMicroelectronics. “The wide choices of products available within all the STM32 families will facilitate the wide adoption of the LITE software reference platform and speed-up innovation for IoT applications.” + +“Fragmentation has been a feature of the embedded landscape since the beginning as many applications have been specialized and relatively low volume,” said Huang Yihua , ZTE Director of strategy planning department. “As smarter embedded technology is used across a broader range of high volume applications, software portability, connectivity and maintenance becomes more and more important. We see Linaro and LITE as a neutral environment in which we can collaborate with our competitors and partners to develop non-differentiating open source software building blocks that will reduce fragmentation,deploy software quickly and accelerate innovation.” + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-marvell-founding-member-linaro-community-boards-group.mdx b/src/content/blogs/linaro-announces-marvell-founding-member-linaro-community-boards-group.mdx new file mode 100644 index 0000000..23da8b5 --- /dev/null +++ b/src/content/blogs/linaro-announces-marvell-founding-member-linaro-community-boards-group.mdx @@ -0,0 +1,41 @@ +--- +title: Linaro announces Marvell as a founding member of the Linaro Community + Boards Group +description: Marvell invests in enabling the open source development community + with the development of a new cost-effective quad-core Arm 64-bit development + board +image: linaro-website/images/blog/DataCenter +tags: + - linaro-connect + - linux-kernel + - open-source +author: linaro +date: 2015-02-09T12:58:09.000Z +link: /news/linaro-announces-marvell-founding-member-linaro-community-boards-group/ +related: [] + +--- + +## Marvell invests in enabling the open source development community with the development of a new cost-effective quad-core Arm 64-bit development board + +HONG KONG, China; 9 FEBRUARY 2015 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today at Linaro Connect Hong Kong 2015 announced that leading silicon solution provider Marvell Technology Group Ltd. (MRVL) has joined Linaro as a founding member of the new Linaro Community Boards Group (LCG). + +Marvell, a worldwide leader in providing complete silicon solutions from mobile communications to storage, Internet of Things (IoT), cloud infrastructure, digital entertainment and in-home content delivery and Kinoma® software has joined the LCG to deliver a highly integrated quad-core 64-bit ArmADA® Mobile PXA1928 platform in an easily accessible format to software developers, OEMs and hobbyists around the world. + +The [ArmADA Mobile PXA1928 was announced](https://www.marvell.com/company/newsroom.html) in February 2014 and has been [named](https://web.archive.org/web/2019*/https://plus.google.com/+GoogleATAP/posts/CNa71nE6kfN) as one of the processors being used in the Google Project Ara application processor module reference designs. The multi-mode LTE System-on-Chip (SoC) is already running Linux and Android in various devices such as tablets and smartphones, and Marvell plans to release a [96Boards](https://www.96boards.org/) compatible platform based on this processor in the next few weeks. This will bring the functionality of these shipping devices onto the desks of developers where they can easily develop and optimize the operating systems and applications for both existing and new devices. + +“Linaro has been working closely with Marvell on Project Ara and we are excited that the ArmADA Mobile PXA1928 will be available to developers on an easily accessible board,” said George Grey, CEO of Linaro. “I’m very pleased to welcome Marvell as a founding member of the Linaro Community Boards group and I look forward to the launch of their 96Boards compatible product in the next few weeks.” + +The LCG has been formed to support the new 96Boards initiative. 96Boards is the first open hardware specification that provides a platform for the delivery of compatible low-cost, small footprint 32-bit and 64-bit Cortex-A boards. Standardized expansion buses for peripheral I/O, display and cameras allow the hardware ecosystem to develop a range of compatible add-on products that will work on 96Boards products over the lifetime of the platform. The LCG Steering Committee will manage the evolution of the 96Boards open specifications and the development of the 96Boards community. The 96Boards website provides software downloads and updates, information on compatible products, and forums for software developers, makers and OEMs to get community software support and downloads for all 96Boards products. + +“As a founding member of the Linaro Community Boards Group (LCG), Marvell is pleased to participate in the launch of the 96Boards initiative,” said Philip Poulidis, Vice President and General Manager, Internet of Things Business Unit at Marvell. Linaro is a key place for collaborative software and hardware engineering on the Arm architecture and we are proud of our long-standing work with the Arm ecosystem, enabling the “Smart Life, Smart Lifestyle,” for consumers around the globe. We believe in providing cost-effective, best-in-class platforms for new developer communities to be able to create a wider array of applications with products such as the 64-bit quad core PXA1928.” + +**About Linaro** Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 150 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit [](). + +**About Marvell** Marvell (NASDAQ: MRVL) is a global leader in providing complete silicon solutions and Kinoma® software enabling the “Smart Life and Smart Lifestyle.” From mobile communications to storage, Internet of Things (IoT), cloud infrastructure, digital entertainment and in-home content delivery, Marvell’s diverse product portfolio aligns complete platform designs with industry-leading performance, security, reliability and efficiency. At the core of the world’s most powerful consumer, network and enterprise systems, Marvell empowers partners and their customers to always stand at the forefront of innovation, performance and mass appeal. By providing people around the world with mobility and ease of access to services adding value to their social, private and work lives, Marvell is committed to enhancing the human experience. + +As used in this release, the term “Marvell” refers to Marvell Technology Group Ltd. and its subsidiaries. For more information, please visit [www.Marvell.com](https://www.marvell.com/). diff --git a/src/content/blogs/linaro-announces-mediatek-member.mdx b/src/content/blogs/linaro-announces-mediatek-member.mdx new file mode 100644 index 0000000..bc10bbf --- /dev/null +++ b/src/content/blogs/linaro-announces-mediatek-member.mdx @@ -0,0 +1,39 @@ +--- +author: linaro +date: 2014-03-17T14:05:49.000Z +description: Wireless technology leader MediaTek builds on open source footprint + by joining Linaro +link: /news/linaro-announces-mediatek-member/ +title: Linaro announces MediaTek as member +tags: [] +related: [] + +--- + +## Wireless technology leader MediaTek builds on open source footprint by joining Linaro + +CAMBRIDGE, UK - 17 MAR 2014 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced that MediaTek has joined Linaro as a member. + +MediaTek is a leading worldwide designer of systems on a chip (SOCs) for mobile and home entertainment products. Arm-based processors are widely used in MediaTek platforms, including the company’s new 64-bit octa-core MT6752 with LTE. This 64-bit architecture is central to its strategy for continued expansion and success in global markets. + +Linaro membership will allow MediaTek to further accelerate the time-to-market for its technology partners. MediaTek brings a wealth of experience to Linaro, both from its long history of developing highly integrated SOCs for a range of applications as well as its deep expertise in innovative cross-platform solutions. + +“MediaTek is an innovative and exciting company in the Arm ecosystem” said George Grey, Linaro CEO. “We are very pleased that MediaTek has decided to join Linaro, and we look forward to working closely with them on accelerating open source support for new Arm technologies, such as the 64-bit Armv8 processor architecture.” + +“MediaTek has been a long-time partner of Arm and is supportive of open source communities like Linaro. We’re delighted to have this opportunity to collaborate with other industry leaders to drive open source innovations,” said Kevin Jou, MediaTek’s Chief Technology Officer. + +MediaTek engineers fully participated last week in Linaro Connect Asia 2014 (LCA14) in Macau. This event, held twice each year, brings together Linaro members and community engineers from around the world to discuss, define and agree on Linaro's roadmaps across its work in core Arm enablement, as well as on the software ecosystem for mobile, networking and server market segments. + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of open source on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit. + +**About MediaTek** + +MediaTek is a pioneering fabless semiconductor company, and a market leader in cutting-edge systems on a chip for wireless communications, HDTV, DVD and Blu-ray. MediaTek created the world's first octa-core smartphone platform with LTE and its CorePilotTM technology released the full power of multi-core mobile processors. MediaTek \[TSE:2454] is headquartered in Taiwan and has offices worldwide. Please visit [www.mediatek.com](http://www.mediatek.com/) for more information. +Press office: PR@mediatek.com ++1 650-391-5514 diff --git a/src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx b/src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx new file mode 100644 index 0000000..8c1e002 --- /dev/null +++ b/src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx @@ -0,0 +1,153 @@ +--- +author: linaro +date: 2018-03-19T00:00:00.000Z +title: Linaro Announces OpenDataPlane Tiger Moth LTS Software +description: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm ecosystem, today announced the availability of + the second Long Term Support (LTS) release of OpenDataPlane +published: true +tags: + - linaro-connect +keywords: Linaro, Connect, HKG18, OpenDataPlane, New Release, LTS, Tiger, Moth, Software +related: [] + +--- + +## Linaro Announces OpenDataPlane Tiger Moth LTS Software Release with Support for Arm and x86 + +**Software Defined Data Plane Supports SoCs, SmartNICs, and Servers** +**With Full Support for Accelerated IPsec Processing** + +\[Hong Kong, China, 19 March 2018] Linaro Ltd, the open source collaborative engineering +organization developing software for the Arm ecosystem, today announced the availability of the +second Long Term Support (LTS) release of OpenDataPlane ® , code named “Tiger Moth”. The +OpenDataPlane project is an open-source, cross-platform set of application programming +interfaces (APIs) for the networking Software Defined Data Plane. + +OpenDataPlane has been optimized to take advantage of Arm-based SoC (System on Chip) +processors that provide a very high level of integration of high performance network interfaces +and hardware packet and crypto accelerators. Another benefit is that OpenDataPlane supports +the ability to run the exact same software code base on both x86 and Arm-based server class +processors using standard NICs or SmartNICs, that utilize DPDK for input and output. + +OpenDataPlane acts as a standard unifying architecture that allows true “write once, accelerate +anywhere” applications to take best advantage of polled or event driven network architectures +and hardware accelerators with little to no effort. Now OEMs and software developers can more +easily take advantage of new and improved silicon without fork-lift upgrades to their +applications. + +Reference implementations of OpenDataPlane provided by the Linaro Networking Group (LNG) +as well as production-grade implementations provided by various silicon partners are available. +The Linaro components, consisting of the API Specification, Reference Implementations, and +Validation Test Suite, can be found on GitHub (https://github.com/Linaro/odp) and via the +OpenDataPlane web site (https://opendataplane.org/downloads/). Links to performance- +optimized native Arm-based SoC implementations from LNG member companies, including +Cavium™, are available at the same location on the ODP web site. + +“This new OpenDataPlane ‘Tiger Moth’ LTS release provides real world applications a stable +and optimized set of APIs that run on accelerated Arm-based SoCs, Arm-based and x86 +servers, and SmartNICs. This release includes full support for IPsec offload to meet the needs +for line-rate secure communication, as well as many other advancements over the previous +‘Monarch’ LTS release,” said François-Frédéric Ozog, LNG Director at Linaro. +OpenDataPlane Features include: + +* Common Application API across Arm and x86 + +* Support for Inline IPsec (little to no involvement of processor cores) + +* Support for IPsec Lookaside processing + +* Support for Arm-based SoCs + +* Support for Arm-based Servers + +* Support for SmartNICs + +* Support for x86 Servers + +* Support for Hardware Packet Accelerators including + * Buffer/Packet managers + * Packet Parsers and Classifiers + * Packet ordering engines + * Integrated cryptographic processing + * Integrated I/O + +* Support for both Hardware and Software Schedulers and Load Balancers + +* Support for both Hardware and Software Traffic Managers + +* Support for FAT IPsec pipes - ability to load balance traffic across multiple cores + +* Support for DPDK on Arm and x86 + +* Full Validation Test Suites available + +* Embedded and Cloud Ready + +* Production-grade implementations available + +OpenDataPlane is developed jointly by LNG members and the wider open source community to +represent the interests of application developers, silicon vendors, telecom equipment +manufacturers and software solution providers, and has been validated on both Arm and x86- +based systems. + +> "The Tiger Moth release of ODP highlights the power of collaboration between Arm and Linaro +> to deliver innovative networking infrastructure solutions" +> said Mark Hambleton, senior director, Open Source Software, Arm. +> "The ability to bring forward the capabilities of unique solutions +> from our partners, while delivering truly cross-platform portability across a broad range of +> offerings, is a key value of the Arm ecosystem" + +“OpenDataPlane (ODP) continues to increase its value. With this release that supports Arm- +based SoCs, Arm-based Servers, and even DPDK on x86 Servers, Linaro and all of the Linaro +Network Group members are again actively demonstrating the real benefits of our collaborative +engineering efforts,” said Larry Wikelius, Vice President Software Ecosystem and Solutions +Group at Cavium, Inc. “ODP is an outstanding proof point for the value of standard interfaces +that allow Armv8-based SoC and server vendors to showcase differentiating performance and +features while still supporting leading software applications. Cavium is proud to continue its +tradition of open source community leadership and intends to deliver Tiger Moth across the +range of ThunderX ® , ThunderX2 ® and OCTEON TX ® product families.” + +Enea has been a member of LNG and contributor to the ODP project for several years, and the +OpenDataPlane cross-platform API is today supported and leveraged by Enea’s own data plane +solutions. “The Tiger Moth release represents a new significant milestone for the +OpenDataPlane project at large, and is a key platform for our continued work with accelerated +data plane and OS solutions across a broad range of CPU architectures, including both Arm +and Intel-based hardware platforms,” said Adrian Leufvén, SVP OS Business Unit, Enea. +The Tiger Moth software release supports Nokia’s end-to- end Future X vision for 5G and the +silicon advances made with its recently announced ReefShark chipset family. Tiger Moth +supports critical features of 5G such as low latency and high throughput. “Tiger Moth-based +system software is an essential part of Nokia Reefshark and Nokia’s Future X realization,” said +Jarmo Hillo, Processor Technology Lead at Nokia Networks. + +OpenDataPlane has been a key component already from the start of the OpenFastPath (OFP) +project. The event driven architecture of OFP is derived directly from OpenDataPlane, which + +also provides the former project with the necessary cross-platform portability. “OFP’s future +roadmap will take full advantage of the Tiger Moth release. OpenDataPlane provides the +necessary platform for event driven packet processing, and we are excited to continue our work +in close collaboration with Linaro,” said Daniel Forsgren, President of the OpenFastPath +Foundation. + +## About LNG + +The Linaro Networking Group (LNG) was founded in February 2013 by twelve member +companies. The OpenDataPlane project was established from the start of LNG to produce an +open-source, cross-platform application programming interface (API) for the networking data +plane, that offers both portability and automatic access to vendor-optimized platform +acceleration capabilities, as well as linear scalability for applications deployed in many-core +system environments. + +## About Linaro + +Linaro is leading collaboration on open source development in the Arm ecosystem. The +company has over 300 engineers working on consolidating and optimizing open source +software for the Arm architecture, including developer tools, the Linux kernel, Arm power +management, and other software infrastructure. Linaro is distribution neutral: it wants to provide +the best software foundations to everyone by working upstream, and to reduce non- +differentiating and costly low-level fragmentation. The effectiveness of the Linaro approach has +been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as +one of the top five company contributors, worldwide, to Linux kernels since 3.10. +To ensure commercial quality software, Linaro’s work includes comprehensive test and +validation on member hardware platforms. The full scope of Linaro engineering work is open to +all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96Boards.org/). diff --git a/src/content/blogs/linaro-announces-qualcomm-as-the-latest-industry-leader-to-become-a-member.mdx b/src/content/blogs/linaro-announces-qualcomm-as-the-latest-industry-leader-to-become-a-member.mdx new file mode 100644 index 0000000..9d4338b --- /dev/null +++ b/src/content/blogs/linaro-announces-qualcomm-as-the-latest-industry-leader-to-become-a-member.mdx @@ -0,0 +1,42 @@ +--- +title: Linaro Announces Qualcomm as the latest Member +description: Qualcomm, a wireless technology leader, builds on open source + footprint by joining Linaro as a club member. Read more here. +image: linaro-website/images/blog/30921188158_953bca1c9f_k +author: linaro +date: 2014-02-20T12:04:06.000Z +link: /news/linaro-announces-qualcomm-as-the-latest-industry-leader-to-become-a-member/ +tags: [] +related: [] + +--- + +## Wireless technology leader Qualcomm builds on open source footprint by joining Linaro + +CAMBRIDGE, UK - 20 FEB 2014 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced that Qualcomm Innovation Center, Inc. (QuIC) has joined Linaro as a Club Member. + +Linaro now has a total of 25 member companies working together to accelerate open source software development for the Arm architecture. + +As the Arm architecture advances rapidly with the delivery of 64-bit low power processor cores, the benefits to be gained from collaboration on common open source software across the industry increase. Linaro’s goals are to enable more rapid innovation in the industry through using shared resources to engineer common software elements, enabling each member to focus more of their own resources on product differentiation. + +QuIC is a wholly owned subsidiary of Qualcomm Incorporated, and it not only brings a history of innovation in wireless technology into Linaro, but also adds its extensive open source development experience to the 200+ open source software engineers already working in Linaro to accelerate the delivery of open source software across the industry. + +The major difference between Linaro and other collaborative open source organizations is that Linaro focuses on engineering and implementation rather than the agreement on and definition of standards. Linaro and QuIC expect to derive mutual benefit from working together through a shared focus on open source software. + +“We are very pleased to welcome Qualcomm as a new member of Linaro”, said George Grey, Linaro CEO. “There is clear synergy between QuIC’s open source work for Qualcomm products and Linaro’s open source engineering for the Arm architecture. We look forward to delivering high ROI to Qualcomm through shared engineering across the Linaro membership.” + +Jason Bremner, Senior Vice President of Product Management for Qualcomm said, “Qualcomm has a rich history of supporting and contributing to the open source community from the launch of the first Android device in 2007.  Through becoming a member of Linaro we have an opportunity to extend this commitment into new areas and further drive open source innovation through industry collaboration.” + +About Linaro + +Linaro is the place where engineers from the world’s leading technology companies define the future of open source on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top three company contributors to recent Linux kernels (*LWN lists Linaro as the number 2 company contributor to kernels 3.12 and 3.13 and #3 to kernels 3.10 and 3.11: *[*http://lwn.net/Articles/579081/*](http://lwn.net/Articles/579081/)*,*[*http://lwn.net/Articles/570483/*](http://lwn.net/Articles/570483/)*, *[*http://lwn.net/Articles/563977/*](http://lwn.net/Articles/563977/)*, *[*http://lwn.net/Articles/555968/*](http://lwn.net/Articles/555968/)*.*). + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. + +About Qualcomm + +Qualcomm Incorporated (NASDAQ: QCOM) is the world leader in 3G, 4G and next-generation wireless technologies. Qualcomm Incorporated includes Qualcomm’s licensing business, QTL, and the vast majority of its patent portfolio. Qualcomm Technologies, Inc., a wholly-owned subsidiary of Qualcomm Incorporated, operates, along with its subsidiaries, substantially all of Qualcomm’s engineering, research and development functions, and substantially all of its products and services businesses, including its semiconductor business, QCT. For more than 25 years, Qualcomm ideas and inventions have driven the evolution of digital communications, linking people everywhere more closely to information, entertainment and each other. For more information, visit Qualcomm’s [website](http://www.qualcomm.com/), [OnQ blog](http://www.qualcomm.com/media/blog), [Twitter ](https://twitter.com/Qualcomm)and [Facebook](https://www.facebook.com/qualcomm) pages. + +For more information on the company, access to software and tools, and information on the community and open engineering, visit [www.linaro.org](/) diff --git a/src/content/blogs/linaro-announces-software-reference-platform-arm-servers.mdx b/src/content/blogs/linaro-announces-software-reference-platform-arm-servers.mdx new file mode 100644 index 0000000..76ce61a --- /dev/null +++ b/src/content/blogs/linaro-announces-software-reference-platform-arm-servers.mdx @@ -0,0 +1,39 @@ +--- +excerpt: Linaro announces a complete open source Software Reference Platform for + servers running on Armv8-A processors. +title: Linaro announces Software Reference Platform for Arm servers +description: Linaro announces a complete open source Software Reference Platform + for servers running on Armv8-A processors. +image: linaro-website/images/blog/Datacenter +author: linaro +date: 2016-01-27T14:00:24.000Z +tags: + - linux-kernel + - open-source + - datacenter +link: /news/linaro-announces-software-reference-platform-arm-servers/ +related: [] + +--- + +Cambridge, UK; 27 January 2016 + +Linaro Ltd, the collaborative engineering organization developing software for the Arm® architecture, today announced a complete open source Software Reference Platform for servers running on Armv8-A processors. Linaro released alpha source code on 23 December 2015, and plans to partner with its members to provide access to the Software Reference Platform running on developer cloud instances in Europe, China and the US in the first half of 2016. + +This launch has, for the first time, made not only a complete end to end open source server software stack for servers and cloud providers available, but will also provide access to enterprise-class Arm based server hardware for developers worldwide. + +The Reference Software Platform releases are expected to be used by Linaro members and the wider community for enterprise products and cloud instance development and deployment. Releases will be provided for different market segments, and early access to the Linaro Enterprise Group (LEG) and Linaro Mobile Group (LMG) builds is now available. The alpha release of the LEG build offers a complete reference implementation for Arm servers, including open source boot software and firmware implementing the Arm Trusted Firmware, UEFI and ACPI standards, a Linux 4.4 kernel, tested latest Debian and CentOS distributions, OpenStack, OpenJDK, Hadoop and Spark. + +During 2016 the Linaro Software Reference Platform releases will support an increasing range of data center, networking and home gateway applications. These releases will provide market segment specific application stacks for end to end use case development. + +Since the launch of Arm based server SoCs in 2014, products have developed rapidly and servers are already being used in a number of commercial organizations, government institutes and higher education establishments. The new developer cloud instances being planned by Linaro and ecosystem partners will make access to Arm enterprise computing available to a broader audience who want to develop for this new technology without upfront hardware investment. Linaro has offered bare metal access to Arm servers in a limited colocation facility for the last year. The organization has been using this to help develop a complete cloud stack that will include a management interface and support for virtualization and data analytics. + +The initial launch of the developer cloud instances will happen on existing Arm powered server hardware. As 96Boards Enterprise Edition boards are released, Linaro will also use them as a standardized hardware platform to test the Reference Software Platform and provide developers with a lower cost option to install their own local developer cloud or single server instance. + +“We have released the Linaro Enterprise Group (LEG) Reference Software Platform build to help accelerate the testing and deployment of the Arm architecture in data centers”, said Ricardo Salveti, Engineering Manager for the Software Reference Platform. “This will be the first time that all the foundational software components required to run a server on the Armv8-A architecture have been brought together, tested and released openly.” + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-spreadtrum-communications-latest-club-member.mdx b/src/content/blogs/linaro-announces-spreadtrum-communications-latest-club-member.mdx new file mode 100644 index 0000000..779a61c --- /dev/null +++ b/src/content/blogs/linaro-announces-spreadtrum-communications-latest-club-member.mdx @@ -0,0 +1,40 @@ +--- +author: linaro +date: 2014-10-29T16:54:06.000Z +description: Leading China fabless semiconductor company Spreadtrum + Communications joins Linaro to influence development direction of open source + software +excerpt: Leading China fabless semiconductor company Spreadtrum Communications + joins Linaro to influence development direction of open source software +link: /news/linaro-announces-spreadtrum-communications-latest-club-member/ +tags: [] +title: Linaro announces Spreadtrum Communications as latest Club Member +related: [] + +--- + +## Leading China fabless semiconductor company Spreadtrum Communications joins Linaro to influence development direction of open source software + +CAMBRIDGE, UK – 29 OCTOBER 2014 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced that Spreadtrum Communications (Shanghai) Co., Ltd. has joined Linaro as a Club Member. + +Spreadtrum has been at the forefront of bringing competitively priced mobile platforms based on Android operating systems to the market since 2012, and most recently has worked with customers on the launch of sub-US$40 Firefox OS smartphones in India and other countries. The company offers mobile smartphone platforms based on Arm’s Cortex-A processors. + +“We are very happy to join the Linaro community and to collaborate with other companies on driving software innovation in mobile platforms,” said Xiaomao Xiao, VP of Software Platform at Spreadtrum. “Spreadtrum has worked with open source software for many years and we can clearly see the advantage of taking an active role in the open source communities, working to accelerate development on a range of platforms.” + +The addition of Spreadtrum to the Linaro community brings membership of the collaborative engineering organization to a total of thirty companies, including other SoC suppliers, software vendors, OEMs and end users. The diversity of the membership and the focus of discussions and development efforts on core, mobile, networking, home and server software engineering results in collaboration between expert participants from the world’s most innovative companies, driven by input from key end users in the relevant market segments. + +“I welcome Spreadtrum to Linaro as a Club Member. Their experience in mobile chipset platforms will be a valuable contribution to our technical and strategic discussions,” said Joe Bates, EVP of Member Services at Linaro. “The China ecosystem is increasingly more active in defining the development direction for open source software on Arm processors and I’m very happy that Spreadtrum wants to play a leadership role in industry collaboration through Linaro.” + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of open source on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit [](). + +**About Spreadtrum Communications (Shanghai) Co., Ltd.,** + +Spreadtrum Communications (Shanghai) Co., Ltd. (“Spreadtrum”) is a fabless semiconductor company that develops mobile chipset platforms for smart phones, feature phones and other consumer electronics products, supporting 2G, 3G and 4G wireless communications standards.  Spreadtrum’s solutions combine its highly integrated, power-efficient chipsets with customizable software and reference designs in a complete turnkey platform, enabling customers to achieve faster design cycles with a lower development cost.  Spreadtrum’s customers include global and China-based manufacturers developing mobile products for consumers in China and emerging markets around the world. Spreadtrum Communications, Inc. is a privately held company headquartered in Shanghai and an affiliate of Tsinghua Unigroup, Ltd. + +For more information, please visit - www.spreadtrum.com diff --git a/src/content/blogs/linaro-announces-support-for-96boards-hikey-in-aosp.mdx b/src/content/blogs/linaro-announces-support-for-96boards-hikey-in-aosp.mdx new file mode 100644 index 0000000..e345074 --- /dev/null +++ b/src/content/blogs/linaro-announces-support-for-96boards-hikey-in-aosp.mdx @@ -0,0 +1,34 @@ +--- +excerpt: Linaro today announced that the 96Boards HiKey octa-core 64-bit Armv8 + community board is supported in AOSP (Android Open Source Project). Developers + can now download source code from AOSP and create a working build without the + need to pull patches from any other repository. +title: Linaro announces support for 96Boards HiKey in AOSP +description: Linaro today announced that the 96Boards HiKey octa-core 64-bit + Armv8 community board is supported in AOSP (Android Open Source Project). + Developers can now download source code from AOSP and create a working build + without the need to pull patches from any other repository. +image: linaro-website/images/blog/96boards-specification-consumer-edition-v2 +author: linaro +date: 2016-03-07T10:58:22.000Z +tags: + - android +link: /news/linaro-announces-support-for-96boards-hikey-in-aosp/ +related: [] + +--- + +Cambridge, UK;  7 March 2016 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that the 96Boards HiKey octa-core 64-bit Armv8 community board is supported in AOSP (Android Open Source Project). Developers can now download source code from AOSP and create a working build without the need to pull patches from any other repository. + +Linaro has worked on AOSP contributions and toolchain optimizations since its establishment in 2010. Over the last five years, Linaro has provided various builds of AOSP for member hardware, but these have all been maintained outside of the AOSP common tree. Moving forward for existing and future versions of Android, Linaro is simplifying its provision of builds and focusing its efforts on getting full support and ongoing maintenance for member hardware into AOSP. Support for the 96Boards HiKey is available now and we expect additional hardware platform support will follow. + +“One of the challenges of developing on AOSP has been the lack of a developer friendly platform combining community hardware with an open source software stack,” said Tom Gall, Director of the Linaro Mobile Group (LMG). “We’re very happy to have been able to have support for the 96Boards HiKey accepted into the AOSP common tree and look forward to enabling developers.” + +Information about the HiKey board and Running Android on HiKey are available here: [http://source.android.com/source/devices.html](http://source.android.com/source/devices.html). Linaro is providing instructions for developers here: [http://linaro.co/hikey-start](https://github.com/96boards/documentation/wiki/HiKeyGettingStarted#aosp-build-from-source)  and the hardware platform can be purchased from [](https://www.seeedstudio.com/)[seeed](https://www.seeedstudio.com/). A video of Linaro CEO George Grey’s keynote at Linaro Connect, including this announcement, can be found here: [http://linaro.co/bkk16-keynote](https://resources.linaro.org/en/resource/fK98dBxFbbMEiEkxAm2Hcn) + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-announces-zte-latest-club-member.mdx b/src/content/blogs/linaro-announces-zte-latest-club-member.mdx new file mode 100644 index 0000000..42319d2 --- /dev/null +++ b/src/content/blogs/linaro-announces-zte-latest-club-member.mdx @@ -0,0 +1,35 @@ +--- +author: linaro +date: 2014-03-18T14:06:23.000Z +description: Telecoms and networking leader ZTE joins Linaro to promote global + development of open source software +link: /news/linaro-announces-zte-latest-club-member/ +title: Linaro announces ZTE as latest Club Member +tags: [] +related: [] + +--- + +## Telecoms and networking leader ZTE joins Linaro to promote global development of open source software + +CAMBRIDGE, UK - 18 MAR 2014 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced that ZTE Corporation has joined Linaro as a Club Member. + +ZTE Corporation (“ZTE”) (H share stock code: 0763.HK / A share stock code: 000063.SZ) is a publicly-listed global provider of telecommunications equipment, network solutions and mobile devices. ZTE has joined Linaro at the same time as three other mobile industry leaders, bringing membership of the not-for-profit organization to a total of 29 companies. + +“I welcome ZTE joining Linaro as a club member. Their experience in wireless and networking will help us in our technical and strategic discussions,” said David Rusling, CTO Linaro. “At the recent Linaro Connect in Macau, we started to work together on new technologies, built around the 64-bit Armv8-A architecture as well as initiatives such as Linaro's Long-term Supported Kernel (LSK). LSK is a key delivery mechanism for both Armv7-A and Armv8-A. As well as supporting operating systems such as Android and Firefox OS, LSK allows us to pull together and demonstrate advanced kernel features that are needed for products now and in the future." + +ZTE has a strong track record demonstrating commitment to open source technology development. As early as 2002 – six years before the first Android smartphone – ZTE introduced a Linux-based smartphone and it was one of the early members of Google’s Open Handset Alliance (OHA). The company is also a long-term member of the Linux Foundation, receiving Carrier Grade Linux (CGL) 5.0 accreditation in 2012. + +“We are honored to become a Club Member of Linaro,” said Jill Guo, Director of Strategic Partnerships at ZTE. “ZTE is committed to working with Linaro and other open source communities to accelerate the development of technologies on the Arm platform. The collaboration with Linaro will cover the development of a comprehensive range of ZTE products, including mobile devices, servers, Core Network products, Internet Protocol Television and SmartHome.” + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of open source on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. For more information about Linaro, visit [](/). + +**About ZTE** + +ZTE is a publicly-listed global provider of telecommunications equipment and network solutions with the most comprehensive product range covering virtually every telecommunications sector, including wireless, access & bearer, VAS, terminals and professional services. The company delivers innovative, custom-made products and services to over 500 operators in more than 160 countries, helping them to meet the changing needs of their customers while growing revenue. ZTE commits 10 per cent of its annual revenue to research and development and has leadership roles in several international bodies devoted to developing telecommunications industry standards. ZTE is committed to corporate social responsibility and is a member of the UN Global Compact. The company is China’s only listed telecom manufacturer that is publicly traded on both the Hong Kong and Shenzhen Stock Exchanges (H share stock code: 0763.HK / A share stock code: 000063.SZ). For more information, please visit https://www.zte.com.cn/global/about/corporate\_information/Introduction. diff --git a/src/content/blogs/linaro-appoints-guy-berruyer-chairman.mdx b/src/content/blogs/linaro-appoints-guy-berruyer-chairman.mdx new file mode 100644 index 0000000..f9b3f95 --- /dev/null +++ b/src/content/blogs/linaro-appoints-guy-berruyer-chairman.mdx @@ -0,0 +1,33 @@ +--- +excerpt: Linaro announced that Guy Berruyer has been appointed as Chairman of + the Linaro Board. +title: Linaro appoints Guy Berruyer Chairman +description: Linaro announced that Guy Berruyer has been appointed as Chairman + of the Linaro Board. +image: linaro-website/images/blog/30921188158_953bca1c9f_k +author: linaro +date: 2015-10-27T13:07:00.000Z +tags: + - linux-kernel + - open-source +link: /news/linaro-appoints-guy-berruyer-chairman/ +related: [] + +--- + +Cambridge, UK; October 27, 2015 + +Linaro Ltd, the collaborative engineering organisation developing open source software for the Arm® architecture, today announced that Guy Berruyer has been appointed to the position of Chairman of the [Linaro Board.](/about/team/) + +Mr Berruyer, formerly CEO of Sage Group Plc, a FTSE 100 multi-national software company, assumed the post of Chairman at the Linaro Board meeting held on Thursday 8 October. + +“Linaro has seen significant growth since its founding in 2010 and we are constantly encountering new opportunities and challenges,” said George Grey, Linaro CEO. “I look forward to working with Guy to build on and accelerate this growth to realise Linaro’s full long-term potential.” + +“Linaro has a strong track record, unique engineering organisation, major contribution to open source software for the Arm architecture, and innovative business model that are generating a number of very interesting growth opportunities,” said Guy Berruyer, Linaro Board Chairman. “I’m excited to have this opportunity to help guide the Linaro executive team to drive the company forward in its goal to bring even more business success to the Linaro members.” + +Mr Berruyer, a French national, with a degree in Electrical Engineering and an MBA from Harvard Business School has operational and M\&A experience across all continents. He joined Sage as CEO France in 1997 and was appointed to the SAGE Group Board in January 2000 and to the position of CEO in 2010, which he held until retiring in November 2014. His early [career](https://www.linaro.org/careers/) was spent with US and European software and hardware vendors in Sales, Marketing and general management roles. He has held a number of directorships and is currently a non-executive director on the Board of Meggitt PLC and a member of the University of Southampton’s governing body, the University Council. + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-appoints-jill-guo-to-lead-greater-china-operations.mdx b/src/content/blogs/linaro-appoints-jill-guo-to-lead-greater-china-operations.mdx new file mode 100644 index 0000000..bba29ef --- /dev/null +++ b/src/content/blogs/linaro-appoints-jill-guo-to-lead-greater-china-operations.mdx @@ -0,0 +1,35 @@ +--- +excerpt: Linaro announced that Jill Guo has been appointed as the Executive Vice + President to head up the company’s growing Greater China operations. +title: Jill Guo Appointed as Lead of Greater China Operations +description: Linaro announces that Jill Guo has been appointed as the Executive + Vice President to head up the company's Greater China operations. Read more + here. +image: linaro-website/images/blog/PR_96Boards_banner_pic +author: linaro +date: 2015-10-20T14:25:54.000Z +tags: + - linux-kernel + - open-source +link: /news/linaro-appoints-jill-guo-to-lead-greater-china-operations/ +related: [] + +--- + +Cambridge, UK; October 20, 2015 + +Linaro Ltd,  the collaborative engineering organization developing open source software for the Arm® architecture, today announced that Jill Guo has been appointed as the Executive Vice President to head up the company’s growing Greater China operations. + +Ms Guo reports directly to CEO George Grey and is a member of [Linaro’s Executive Management team](/about/team/). “Greater China’s role in the world as both a producer and consumer of technology continues to grow in importance,” said George Grey, Linaro CEO. “The appointment of Jill Guo to our Executive team clearly demonstrates our commitment to invest in the region and help local companies benefit from the advantages of collaborative engineering and open source software.” + +Although none of Linaro’s six founding companies were based in Greater China, the region has contributed key engineers to Linaro since the beginning and membership in the region has grown rapidly over the last three years. Nine of Linaro’s thirty five members are now from Greater China, including Linaro Core member HiSilicon, and Club members Mediatek, Spreadtrum and ZTE. This growth is expected to continue and the Linaro Greater China organization has plans to expand accordingly to support this development in local membership. + +“I’m excited to develop my relationship with Linaro from my previous role as a partner to now working for Linaro to help lead the company’s development in its most rapidly developing region,” said Jill Guo, Linaro EVP of Greater China. “There are many opportunities for Greater China to benefit from open source software. Cooperative engineering in Linaro on non-differentiating technology provides instant access to experienced open source engineers and Linaro is a unique entry point into these opportunities.” + +Ms Guo came to her first Linaro Connect in Hong Kong in 2013 when she was the Strategic Planning Director in ZTE Corporation’s Mobile Division Product Development Department. Prior to Linaro, Jill worked for ZTE for over 15 years in a variety of roles covering product marketing management, industrial supply-chain and ecosystem partnership management, ODM and outsourcing management, and mobile handset business research. + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/linaro-appoints-mark-orvek-post-vp-engineering.mdx b/src/content/blogs/linaro-appoints-mark-orvek-post-vp-engineering.mdx new file mode 100644 index 0000000..a5761f7 --- /dev/null +++ b/src/content/blogs/linaro-appoints-mark-orvek-post-vp-engineering.mdx @@ -0,0 +1,32 @@ +--- +author: linaro +date: 2013-03-21T12:25:02.000Z +description: CAMBRIDGE, UK - 21 MAR 2013 +link: /news/linaro-appoints-mark-orvek-post-vp-engineering/ +title: Linaro appoints Mark Orvek to post of VP of Engineering +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 21 MAR 2013 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, today announced the appointment of Mark Orvek as VP of Engineering. + +![Mark Orvek - VP of Engineering at Linaro](/linaro-website/images/blog/mark-orvek) + +The announcement was made during the opening keynote at Linaro Connect Asia 2013 in Hong Kong by George Grey, Linaro CEO. He said ""We are very pleased to appoint Mark to the Vice President of  Engineering role. With his extensive previous work in open source software at MontaVista, he brings a strong industry background and experience in high quality software delivery to the Linaro team." + +The Vice President of Engineering is responsible for leading the Linaro engineering team, consisting of over 150 Linaro and member open source engineers. The team consists of Arm toolchain, kernel, power management and graphics/multimedia working groups as well as a platform team responsible for test, validation and upstream code delivery. In addition the Linaro Enterprise Group (LEG) works on Arm Server ecosystem software, and the recently formed Linaro Networking Group (LNG) works on the software ecosystem for network communications equipment. + +"We are experiencing tremendous change and innovation with the Arm architecture and Linaro is well positioned as a key contributor to the rapidly evolving landscape. The engineering team is fantastic and represents some of the very best engineers working in the Linux kernel and associated technologies.  This is a great time to be at Linaro!" said Mark Orvek, Linaro VP of Engineering. + +Mark joined Linaro as Director of Working Groups in August 2012. Prior to this he was Vice President of Engineering and Services at MontaVista Software. He joined MontaVista in 1999 as the director of engineering, responsible for new product development focused on making the Linux operating system suitable for embedded systems and embedded applications. He managed the development of key technologies for Linux including multi-architecture platform support, real-time, carrier grade high availability, small footprint, fast boot, power management and most recently the MontaVista Linux 6 Integration Platform. Before MontaVista, he was an R\&D Section Manager at Hewlett-Packard (HP), holding various positions including technical field support, development engineer, R\&D Project Manager and R\&D Section Manager in HP’s business servers and Real-time embedded board computers.  Mark holds BS degrees in Electrical Engineering from Rochester Institute of Technology and in Computer Science from California State University at Dominguez Hills. + +[Hi-res images of Mark are available here.](/assets/images/people/Mark_Orvek_High_Res.jpg) + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 140 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit [](/). diff --git a/src/content/blogs/linaro-at-elc-2011.mdx b/src/content/blogs/linaro-at-elc-2011.mdx new file mode 100644 index 0000000..f60e1fe --- /dev/null +++ b/src/content/blogs/linaro-at-elc-2011.mdx @@ -0,0 +1,46 @@ +--- +title: Linaro at the Embedded Linux Conference +description: Overview of talks given by Linaro engineers at the 2011 edition of + the Embedded Linux Conference in San Francisco. +image: linaro-website/images/blog/30921188158_953bca1c9f_k +tags: + - iot-embedded + - linux-kernel +author: linaro +date: 2011-04-05T20:53:07.000Z +link: /blog/community-blog/linaro-at-elc/ +related: [] + +--- + +![Embedded Linux Conference 2011 lightbox\_disabled=True](/linaro-website/images/blog/header_elc_2011) + +Next week, the [Embedded Linux Conference](https://events.linuxfoundation.org/) will start in San Francisco, and this year several Linaro engineers will share their work and experience. There will be six talks related to Linaro! + +![David Rusling class=small-inline](/linaro-website/images/blog/DAR) + +First, David Rusling, Linaro's CTO, will present [Linaro, a Year of Change](https://events.linuxfoundation.org/events/elc-openiot-north-america-2018/), covering the reasons for creating Linaro, its achievements and ongoing work. You will also learn about how Linaro is organized. + +![Anrd Bergmann class=small-inline right](/linaro-website/images/blog/bergmannx) + +Then, Arnd Bergman, Linaro kernel engineer, will cover [Optimizations For Cheap Flash Media](https://events.linuxfoundation.org/events/elc-openiot-north-america-2018/), explaining how to work around the shortcomings of cheap flash storage (MMC/SD, eMMC, USB flash drives), and to get the best performance out of these storage devices. Arnd will also be present at the [Technical Showcase](https://events.linuxfoundation.org/events/elc-openiot-north-america-2018/), demonstrating his `flashbench` utility. Participants will be able to come with their own flash media, and reverse engineer them during the showcase. To water your mouth on this topic, you should read the [very interesting article](http://lwn.net/Articles/428584/) that Arnd wrote on LWN.net. + +As the keynote speaker for the second day, Arnd will also present [Becoming Part of the Linux Kernel Community](https://events.linuxfoundation.org/events/elc-openiot-north-america-2018/), sharing lessons learned and guidelines for companies which are now trying to merge code upstream, driven by the requirements of application platforms like Android and Meego. + +![Amit Kucheria class=small-inline ](/linaro-website/images/blog/amit-kucheria) + +Next comes Amit Kucheria, power management tech lead and kernel engineer at Linaro, with a [Powerdebugging Inside Linaro](https://events.linuxfoundation.org/events/elc-openiot-north-america-2018/) talk. Amit will cover Linaro's efforts to make it easier for Arm developers to create battery friendly software, in particular `Powerdebug`, a utility to identify what drains the battery of a product. Amit will also cover the common kernel frameworks that are needed to progress in this area, some already implemented, and some yet to create. Amit has countless things to say about power management, and if you are interested in this topic, you should also read [his blog post about prolonging battery life](http://idlethread.blogspot.com/2010/12/prolonging-battery-life-on-your.html). + +![Paul Larson class=small-inline right ](/linaro-website/images/blog/paul-larson) + +Right after Amit, Paul Larson, tech lead of Linaro's validation team, will share his experience with [Linaro Automated Validation on Arm](https://events.linuxfoundation.org/events/elc-openiot-north-america-2018/). Linaro developed a framework called `Lava`, to implement automated validation of Arm devices. + +![Jesse Barker class=small-inline](/linaro-website/images/blog/JesseBarker) + +The sixth Linaro talk will be presented by Jesse Barker, tech lead for the Linaro Graphics Working Group: [Linux Graphics Meets the Arm Ecosystem](https://events.linuxfoundation.org/events/elc-openiot-north-america-2018/). Jesse will cover the evolution of the Linux graphics stack to meet the needs of mobile and embedded devices. You can also expect some details about OpenGL and OpenGL ES, and will be able to ask all your graphics related questions to Jesse. + +You can also have a look at the [Linaro Wiki](https://www.linaro.org/about/) for more details about all the speakers mentioned above. Other Linaro engineers and community members should also be there. Don't hesitate to take advantage of this conference to get in touch with us! + +If you can't make it to this conference this time, my colleagues at [Free Electrons](https://bootlin.com/community/) will shoot videos of these talks, and will release these talks as quickly as possible. We now have an efficient and hopefully stable video processing flow. + +Have a great experience at ELC! diff --git a/src/content/blogs/linaro-board-minutes-september-2010.mdx b/src/content/blogs/linaro-board-minutes-september-2010.mdx new file mode 100644 index 0000000..149a988 --- /dev/null +++ b/src/content/blogs/linaro-board-minutes-september-2010.mdx @@ -0,0 +1,14 @@ +--- +author: linaro +date: 2010-09-29T10:54:18.000Z +description: CAMBRIDGE, UK - 29 SEP 2010 +link: /news/linaro-board-minutes-september-2010/ +title: Linaro Board minutes September 2010 +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 29 SEP 2010 + +See the latest meeting minutes of the Linaro board meeting. [Download the PDF](/assets/downloads/Linaro-Board-Minutes-2010-09-OPEN-Publish-Final.pdf) diff --git a/src/content/blogs/linaro-brings-open-source-test-platform-open-compute-project.mdx b/src/content/blogs/linaro-brings-open-source-test-platform-open-compute-project.mdx new file mode 100644 index 0000000..063b3ea --- /dev/null +++ b/src/content/blogs/linaro-brings-open-source-test-platform-open-compute-project.mdx @@ -0,0 +1,32 @@ +--- +author: linaro +date: 2013-10-28T12:30:08.000Z +description: Linaro has joined the Open Compute Project (OCP) as an official + member and contributes the LAVA LMP (Linaro Multi-purpose probe) to the + foundation +excerpt: Linaro has joined the Open Compute Project (OCP) as an official member + and contributes the LAVA LMP (Linaro Multi-purpose probe) to the foundation +link: /news/linaro-brings-open-source-test-platform-open-compute-project/ +title: Linaro Brings Open Source Test Platform to Open Compute Project +tags: [] +related: [] + +--- + +SANTA CLARA, US - 28 OCT 2013 + +## Linaro has joined the Open Compute Project (OCP) as an official member and contributes the LAVA LMP (Linaro Multi-purpose probe) to the foundation + +Linaro, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced that it has joined the Open Compute Project (OCP), an initiative launched by Facebook in 2011 to increase technology efficiencies and reduce the environmental impact of data centers. + +OCP applies open-source software principles to the hardware industry to drive the development of the most efficient computing infrastructures at the lowest possible cost. Working with partners in both the hardware and software industries, it strives to achieve further innovation and efficiencies in scale computing technology.In parallel, Linaro – in particular within the Linaro Enterprise Group (LEG) – has become the place where a similar cross-section of companies collaborate on the future of Linux software on Arm-based servers. Linaro’s intention in joining OCP is to bring together the hardware and software discussion and help accelerate the release of more efficient server solutions. It will also contribute the specifications of the LAVA Multi-purpose Probe (LMP) to be made available through OCP. LMP boards extend the testing options for hardware incorporated into the Linaro Automated Validation Architecture (LAVA) test framework, which is available as an open source project. + +“We’re excited to welcome Linaro into the Open Compute Project,” said Cole Crawford, executive director of the Open Compute Foundation. “The organization’s commitment to developing core open source software in a collaborative and transparent environment matches our own mission of openly sharing ideas and specifications to accelerate innovation in pursuit of the most efficient data center infrastructure.” + +“We’ve been working closely with OCP since the founding of the Linaro Enterprise Group and the synergies between hardware and software in this space are very exciting,” said George Grey, Linaro CEO. “We’re very happy to be formally joining OCP and bringing the benefits of LAVA and LMP to a broader audience.” + +About Linaro + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 140 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit . diff --git a/src/content/blogs/linaro-ceo-george-grey-speak-elc-2013.mdx b/src/content/blogs/linaro-ceo-george-grey-speak-elc-2013.mdx new file mode 100644 index 0000000..66cb90b --- /dev/null +++ b/src/content/blogs/linaro-ceo-george-grey-speak-elc-2013.mdx @@ -0,0 +1,54 @@ +--- +author: linaro +date: 2013-02-13T12:23:28.000Z +description: CAMBRIDGE, UK - 13 FEB 2013 +link: /news/linaro-ceo-george-grey-speak-elc-2013/ +title: Linaro CEO George Grey to speak at ELC 2013 +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 13 FEB 2013 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, announced that they will be participating in the [Embedded Linux Conference 2013](https://events.linuxfoundation.org/) being held February 20-22, 2013 at the Parc 55 Hotel in San Francisco, CA. The Embedded Linux Conference (ELC) is the premier vendor-neutral technical conference for companies and developers using Linux in embedded products. This conference, now in its 9th year, has the largest collection of sessions dedicated exclusively to embedded Linux and embedded Linux developers. + +This year Linaro CEO, George Grey, and many key engineers for Linaro will be speaking in several sessions. The time and topic of these talks are: + +* **George Grey, Chief Executive Officer** + + * Wednesday, February 20 + + * 9:00am: Keynote - Working Together to Accelerate Linux Development - Jim Zemlin, Executive Director, The Linux Foundation and George Grey, CEO, Linaro + +* **Jesse Barker,Graphics Working Group** + + * Thursday, February 21 + * 4:00pm: Common Display Framework (Part I) + * 5:00pm: Common Display Framework (Part II) + +* **Mark Orvek, Director, Kernel Working Groups** + + * Thursday, February 21 + + * 1:45pm: Application Diversity Demands Accelerated Linux Innovation + +* **Mans Rullgard, Toolchain Engineer** + + * Thursday, February 21 + + * 4:00pm: Designing for Optimization + +* **Mathieu Poirier, Kernel Engineer** + + * Friday, February 22 + * 9:00am: In Kernel Switcher: A Solution to Support Arm's New big.LITTLE Implementation + +Stop by any of our speaking sessions to learn more about Linaro's work in consolidating and optimizing open source software for the Arm architecture. + +About Linaro: +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 120 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. To ensure commercial quality software, Linaro's work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro's engineering work is open to all online. + +**Join us at Linaro Connect** + +Linaro Connect is held every three to four months to bring the Linux on Arm community together to work on the latest system-on-chip (SoC) developments, plan new engineering efforts and hold engineering hacking sessions. These events give the Linux community an opportunity to be a part of the Linaro team and help to define the Arm tools, Linux kernels and builds of key Linux distributions including Android and Ubuntu on member SoCs. The next Linaro Connect will be March 4-8, 2013 in Hong Kong. diff --git a/src/content/blogs/linaro-completes-first-year-demonstrations-linaro-evaluation-builds-android-ubuntu-introduction-new-partner-program.mdx b/src/content/blogs/linaro-completes-first-year-demonstrations-linaro-evaluation-builds-android-ubuntu-introduction-new-partner-program.mdx new file mode 100644 index 0000000..ce74010 --- /dev/null +++ b/src/content/blogs/linaro-completes-first-year-demonstrations-linaro-evaluation-builds-android-ubuntu-introduction-new-partner-program.mdx @@ -0,0 +1,63 @@ +--- +author: linaro +date: 2011-05-27T11:11:29.000Z +description: CAMBRIDGE, UK - 27 MAY 2011 +link: /news/linaro-completes-first-year-demonstrations-linaro-evaluation-builds-android-ubuntu-introduction-new-partner-program/ +title: Linaro completes first year with demonstrations of Linaro Evaluation + Builds for Android and Ubuntu and introduction of new partner program +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 27 MAY 2011 + +## Milestone highlights Linaro delivering on its promise to make it easier and quicker to develop open source products + +Linaro, a not-for-profit open source software engineering company, is marking its first anniversary with demonstrations of the latest open source software running on its members' hardware at Linaro Developer Summit (LDS) and Computex. The company also announced Linaro Partner Program, an initiative designed to help companies and organizations get involved with Linaro's engineering effort. + +Linaro was launched at Computex in 2010 by Arm, IBM, Freescale, Samsung, ST-Ericsson and TI with the mission to create an open source engineering organization that provides aligned engineering and investment into open source projects to reduce fragmentation, increase optimization and make it easier for OEMs and ODMs to develop Linux-based products. + +### Key milestones: + +* In the first year, Linaro has built a team of more than 100 talented open source developers that have worked on the open source Linux platform on member chips and delivered code upstream that benefits all Linux-based distributions. +* The latest tools, code and optimizations have been used to create Linaro Evaluation Builds for Android and Ubuntu, making it easier for device manufacturers to use Linaro's engineering. +* To help device makers get to market more quickly, Linaro has focused on delivering product quality software and has created the Linaro Automated Validation Architecture (LAVA) platform which runs on a test farm of members' low-cost boards. + +The demonstrations at LDS showcased the Android and Ubuntu Linaro Evaluation Builds for the first time and will be extended further at Computex to include new hardware that will be launched at the show. To follow Linaro's engineering progress, an up-to-date status report can be found at [https://releases.linaro.org](https://releases.linaro.org/) + +"The demonstration of Android and Ubuntu Linaro Evaluation Builds and the development of the validation test farm are tangible proof points of the progress Linaro has made on delivering on its mission," said George Grey, Chief Executive Officer of Linaro. "We have delivered consolidation and performance features upstream, worked with our members on support for their latest SoCs, created optimized implementations of popular distributions and focused on the development and testing of product quality software for the benefit of our members." + +### Linaro Partner Program + +Linaro also announced today the Linaro Partner Program, which is designed to enable companies and organizations to get involved with Linaro's engineering team. The Linaro Partner Program will enable ISVs, software service providers and device manufacturers the ability to embed engineers in Linaro and work together on projects of common interest. This alignment with Linaro will be beneficial to both sides, extending the engineering effort available to Linaro and enabling the sponsoring companies to work with a larger team and build profitable businesses around Linaro's roadmap. + +"A year ago, Linaro was formed to enhance open source innovation for the next wave of always-connected, always-on computing," said Chief Operations Officer Stephen Doel. "Today, as we turn 1 year old, we are pleased to share our major achievements as we announce Linaro's Partner Program that will continue to accelerate the depth of engagement from electronics companies and encourage continuous involvement from the wider community." + +At launch, the Linaro Partner Program includes Thundersoft, Canonical, Mentor Graphics, Genesi and Collabora. Companies interested in aligning with Linaro and joining the Linaro Partner Program should contact the management team. + +### Linaro Founding Member Quotes + +"Over the last year, Linaro has succeeded by helping make it easier and quicker to bring open source based devices to market," said James McNiven, vice president of Software Alliances of Arm. "Linaro has demonstrated the efficiencies that can be achieved by adopting a coordinated approach to open source and working closely with the Arm Partner community." + +"Linaro in conjunction with its other partners of which IBM is one, has been building out a comprehensive ecosystem to enable developers to deliver next-generation software for the mobile segment," said Mark Ireland, vice president of IBM Semiconductor Products & Services and Linaro club director. "Advanced silicon process technology for low power system-on-a-chip designs and software are being used to create solutions optimized for the Arm architecture through the IBM Process Technology alliance." + +"At TI, we strive to create innovative technologies that revolutionize the way people interact with each other and the outside world," said Ari Rauch, senior director of Software and System Engineering, OMAP Products, Wireless Business Unit at TI. "With this pursuit in mind, we are extremely proud to be a part of the Linaro team, and we congratulate the organization on this one-year milestone. We look forward to many more years to come, as we work together to unleash new successes in the Embedded Linux community via proven Arm-based architectures, including TI's OMAP™ processors." + +"Samsung has been putting much effort on delivering optimized software for Arm Linux-based developers as a member of Linaro since its launching in 2010," said Youngki Chung, vice president of Software Solution Development Team, System LSI Division at Samsung Electronics. "We are pleased with Linaro's achievements of consolidated software and environments and believe that our customers and the open source community will experience the benefits of acceleration in designing their products through the innovative Exynos platform." + +"As a member, we are thrilled to see the great achievements of Linaro after just one year of operations," said Teppo Hemia, vice president and head of the Application Engine and Platform Business Unit of ST-Ericsson. "By working in a true open source way, Linaro has made impressive enhancements to the Arm Linux code base which will significantly reduce time to market and investment for OEMs and ODMs. ST-Ericsson is a strong supporter of this approach and has recently, together with partners, launched the Igloo Community and Snowball board as an open source development platform which will fully benefit from the work of Linaro." + +"Linaro is delivering on the goal of quickening time to market for our i.MX processors," said Glen Burchers, consumer and industrial segment marketing director for Freescale Semiconductor. "Our customers now have confidence that the Linux mainline kernel is up to date with the latest Freescale code submissions." + +### Linaro Partner Quotes + +"ThunderSoft is pleased to see many common system requirements being addressed in Linaro project. By joining the Linaro Partner Program, ThunderSoft is better armed to deliver the Arm-based software solution to our customers more solidly and more efficiently," said Duan ZhiQiang, chief technology officer of Thundersoft. + +"Linaro is essential to the continued expansion of open source software on the Arm architecture. As a member of the Linaro Partner Program, we ensure it's quick and easy to develop and use the best open source technologies on Arm. We're pleased to be working with Linaro ensuring great products can be built using Linaro technologies in Ubuntu," said Steve George, vice president of Business Development at Canonical. + +"Linaro is an innovative model for advancing open source technology on the Arm architecture," said Glenn Perry, general manager of the Mentor Graphics Embedded Software Division. "CodeSourcery, which Mentor Graphics acquired in November 2010, was chosen by Linaro at its inception to assist in developing the GNU toolchain. We look forward to expanding our relationship with Linaro as we pursue our goal of developing products that simplify and accelerate embedded Linux development." + +"In its first year, Linaro has proven to be a dynamic and progressive organization. We are confident that Linaro will contribute significantly to the Arm ecosystem in the years to come and we look forward to being involved," said Raquel Velasco, chairman of Genesi. + +"Linaro's focus on delivering improvements upstream perfectly complements Collabora's emphasis on the commercial benefits of effective open source participation," says Robert McQueen, chief technology officer and co-founder of Collabora. "We're really pleased to align our R\&D investment with Linaro's goals and help them build on the progress they've made in the past year." diff --git a/src/content/blogs/linaro-connect-europe-2013-lce13-host-first-demonstration-kvm-appliedmicros-arm-64c2adbit-hardware.mdx b/src/content/blogs/linaro-connect-europe-2013-lce13-host-first-demonstration-kvm-appliedmicros-arm-64c2adbit-hardware.mdx new file mode 100644 index 0000000..e1536ed --- /dev/null +++ b/src/content/blogs/linaro-connect-europe-2013-lce13-host-first-demonstration-kvm-appliedmicros-arm-64c2adbit-hardware.mdx @@ -0,0 +1,52 @@ +--- +author: linaro +date: 2013-07-11T12:28:22.000Z +excerpt: Following the public showing of its X­-GeneTM Armv8 64­-bit Server on a + Chip™ solution at the Red Hat Summit, AppliedMicro is now ready to demonstrate + KVM running on the X-Gene platform +link: /news/linaro-connect-europe-2013-lce13-host-first-demonstration-kvm-appliedmicros-arm-64%c2%adbit-hardware/ +title: Linaro Connect Europe 2013 (LCE13) to Host First Demonstration of KVM on + AppliedMicro’s Arm® 64­bit Hardware +tags: [] +related: [] +description: he Demo Friday event at Linaro Connect Europe (LCE13) on 12 July 2013 will play host to the world’s first demonstration of KVM (Kernel-based Virtual Machine) running on an Armv8 64-­bit silicon platform from Applied Micro Circuits Corporation. + +--- + +DUBLIN, IRELAND - 11 JUL 2013 + +## In summary + +Following the public showing of its X­-GeneTM Armv8 64­-bit Server on a Chip™ solution at the Red Hat Summit, AppliedMicro is now ready to demonstrate KVM running on the X-Gene platform + +The Demo Friday event at Linaro Connect Europe (LCE13) on 12 July 2013 will play host to the world’s first demonstration of KVM (Kernel-based Virtual Machine) running on an Armv8 64-­bit silicon platform from Applied Micro Circuits Corporation (NASDAQ: AMCC). + +“We are very proud of the Arm, Linaro and AppliedMicro software engineers who teamed up to develop and demonstrate KVM on AppliedMicro’s X-GeneTM Cloud Server™ system, bringing to reality enterprise-class virtualization on Arm 64-bit servers,” said Dr. Paramesh Gopi, President and CEO of AppliedMicro. “Enterprise and private cloud customers can now access a complete set of tools to enable low-TCO Platform-as-a-Service applications and to accelerate the seamless transition to Armv8 64-bit hardware.” + +The demonstration will show KVM Armv8 64-­bit based virtualization on the AppliedMicro X-­C1 hardware platform featuring the company’s X-Gene Server on a ChipTM silicon solution. This solution consists of eight Armv8 64-­bit processors running four SMP Linux guest VMs (two Armv7 32­-bit and two Armv8 64-­bit guests) with web servers running concurrently on each VM using VirtIO-based network virtualization. The web servers will be delivering HTML web pages with varying graphical content. + +Arm handled porting KVM for this demonstration from 32-­bit to 64­-bit, while AppliedMicro engineers brought the KVM up on the hardware and Linaro ensured a robust solution. + +“It’s very exciting to be working with AppliedMicro on the world’s first Armv8 64­-bit hardware platform,” said George Grey, Linaro CEO. “We look forward to showcasing KVM virtualization running on this platform tomorrow during the Linaro Connect Demo Friday.” + +This Linaro Connect in Dublin, Ireland is playing host to over 300 engineers working on open source software for the latest Arm processor technology. The attendees from Linaro’s 25 member companies have been joined by other key players in the Arm ecosystem to define the future of Linux on Arm. The event concludes on Friday, 12 July with Demo Friday ­- an opportunity for working groups, individual contributors and member companies like AppliedMicro to showcase their most recent developments. + +About Linaro + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not­for­profit engineering organization with over 140 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non­differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit . + +About AppliedMicro + +Applied Micro Circuits Corporation is a global leader in computing and connectivity solutions for next-generation cloud infrastructure and data centers. AppliedMicro delivers silicon solutions that dramatically lower total cost of ownership. Corporate headquarters are located in Sunnyvale, California. [www.apm.com](http://www.apm.com/). + +*Applied Micro Circuits Corporation, AppliedMicro, X-Gene, Server on a Chip, and Cloud Server are trademarks or registered trademarks of Applied Micro Circuits Corporation. All other product or service names are the property of their respective owners.* + +CONTACT: + +Media Contact: Michael Major, Applied Micro Circuits Corporation + ++1-408-542-8831 + +mmajor@apm.com diff --git a/src/content/blogs/linaro-enables-wider-portability-high-speed-networking-applications-release-opendataplane-v1-0.mdx b/src/content/blogs/linaro-enables-wider-portability-high-speed-networking-applications-release-opendataplane-v1-0.mdx new file mode 100644 index 0000000..0711e64 --- /dev/null +++ b/src/content/blogs/linaro-enables-wider-portability-high-speed-networking-applications-release-opendataplane-v1-0.mdx @@ -0,0 +1,85 @@ +--- +title: Linaro Enables Wider Portability of High Speed Networking Applications + with Release of OpenDataPlane v1.0 +description: OpenDataPlane (ODP) enables proven software portability between + network compute platforms, regardless of the underlying instruction set + architecture (ISA), as well as transparent support for any hardware or + software acceleration capability. +image: linaro-website/images/blog/Banner_Virtualization +tags: + - linaro-connect + - linux-kernel + - open-source +author: linaro +date: 2015-03-03T14:00:23.000Z +link: /news/linaro-enables-wider-portability-high-speed-networking-applications-release-opendataplane-v1-0/ +related: [] + +--- + +## OpenDataPlane (ODP) enables proven software portability between network compute platforms, regardless of the underlying instruction set architecture (ISA), as well as transparent support for any hardware or software acceleration capability. + +Cambridge, UK; 3 March 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for its member companies, today announced the release of [OpenDataPlane](http://www.opendataplane.org/) version 1.0. + +ODP is an open source, open contribution framework for portable, high performance data plane applications. Applications written to the ODP APIs can run on any ODP-enabled network compute platform, including System-on-Chip (SoC), general purpose processor (with or without additional network hardware acceleration) or Application Specific IC (ASIC) solutions. ODP-enabled platforms enjoy both portability and transparent access to any available hardware or software acceleration capabilities. In turn, platforms that support ODP can run any ODP application with a simple recompile. ODP implementations have been developed to support a variety of networking SoCs and processor architectures, including Arm, MIPS, Power Architecture, and x86. + +The ODP open source framework has been developed by the [Linaro Networking Group (LNG)](https://wiki-archive.linaro.org/LNG) since [October 2013](/news/linaro-launches-opendataplane-odp-project-deliver-open-source-cross-platform-interoperability-networking-platforms/) as a joint effort of many stakeholders, spanning both application providers and silicon vendors. These stakeholders have been working towards the common goal of providing a truly open source and cross-platform framework for portable application access to advanced hardware acceleration and offload capabilities provided by industry-leading network SoCs. The release of ODP v1.0 represents the completion of the first phase of ODP development, and provides a stable set of APIs suitable for widespread application development and evaluation. In 2015 ODP will continue to be developed to provide an enhanced set of APIs and implementations across a range of network compute platforms including multiple CPU architectures. + +LNG members anticipate that ODP will be a key building block in the realization of SDN (Software Defined Networking) and NFV (Network Function Virtualization) solutions. The framework provides for the first time a portable cross-architecture design that can be used by application and platform providers to maximize the performance of their unique product capabilities. + +Included in this release is the definition of the ODP v1.0 APIs, an open source reference implementation of these APIs that can run on any Linux platform, as well as a validation suite that can be used by ODP implementations to verify that they conform to the ODP v1.0 API set. Additional implementations of ODP v1.0 targeting specific SoC platforms will be announced by their respective owners in the coming weeks. + +**For more information about ODP, to download a copy of version 1.0, or to become involved in the project, please visit [OpenDataPlane.org.](http://www.opendataplane.org/)** + +***Supporting Quotes*** + +**Arm** + +“OpenDataPlane has already demonstrated cross platform interoperability and has been identified as a critical upstream project for the network infrastructure market,” said Ian Drew, Chief Marketing Officer and EVP Business Development, Arm. “In reaching the ODP 1.0 release milestone, Linaro delivers a shining example of what can be accomplished through open source collaboration.” + +**Cavium** + +“All the members of the Linaro Networking Group (LNG) should be proud that with the release of OpenDataPlane (ODP) v1.0 the team has fulfilled the vision of creating a standard that can provide for cross platform compatibility at the API layer that is completely portable across a range of vendors’ processors including Cavium’s OCTEON and ThunderX product lines.” said Imran Badr, VP of Software Engineering, Cavium. “Cavium’s extensive experience in fast-path development has allowed us to utilize ODP to extract the maximum performance from our accelerated hardware to the point that it is on par with our bare metal implementations. Indeed at Mobile World Congress 2015 we are showing 100Gbps of IPSEC throughput utilizing ODP APIs on our OCTEON III CN78xx 48 core processor as well as a Virtualized Mobile Core (vEPC) demo also utilizing ODP APIs on our ThunderX Armv8-A based cores in a completely virtualized environment. The growing third party support for the ODP ecosystem coupled with the work of the Open Platform for NFV (OPNFV) project will help accelerate the evolution of Network Function Virtualization (NFV) initiatives.” + +**ENEA** + +"As a founding member of the Linaro Networking Group and contributor to the OpenDataPlane initiative, Enea is committed to working together with leading hardware vendors and customers on SDN- and NFV-enabling technologies", said Daniel Forsgren, SVP Product Management at Enea. "We are proud to be a key partner in the Arm ecosystem, developing and hardening optimal software solutions for tomorrow’s connected society." + +**Freescale** + +“Freescale was a major contributor to the development of ODP and is fully supportive of ODP’s charter of providing a truly open, truly multi-platform API for developing highly accelerated and virtualized networks,” said Raja Tabet, Vice President of Software and Systems for Freescale’s Digital Networking Group. “Freescale is also announcing support for ODP on our industry leading line of QorIQ multicore communications processors based on both Arm and Power Architectures”. + +**MontaVista** + +“With the release of ODP v1.0, a truly open, cross platform framework supporting multiple architectures becomes available to Network equipment providers and integrators to build and maintain advanced data plane applications easily. MontaVista has already engaged with key OEMs to integrate ODP applications with our Carrier Grade Edition Linux platforms and moving forward we will be supporting the latest advances in powerful Arm V8 SoCs such as Cavium’s ThunderX.” said Sanjay Raina, General Manager of MontaVista Software. + +**Nokia Networks** + +“Mobile broadband operators face a spread of challenges and exciting new opportunities to build profitability. Traffic growth continues almost unbounded, demanding new network capacity, yet traffic patterns are increasingly unpredictable. At the same time, providing a great network experience for subscribers is extremely important and will become critical as a principal driver of revenue. To meet these needs, operators require extreme flexibility in their network and operations to satisfy their customers by responding with rapid time-to-market performance. In parallel, they need to see a drastic reduction in their total ‘production’ cost per bit. OpenDataPlane is a continuation in our effort to provide better application portability, faster time-to-market, higher performance and lower power across all networking applications,” said Jarmo Hillo, Head of Processor Technology at Nokia Networks. + +*** + +**About Linaro** +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 150 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit [](). + +**About Arm** Arm is at the heart of the world's most advanced digital products. Our technology enables the creation of new markets and transformation of industries and society. We design scalable, energy-efficient processors and related technologies to deliver the intelligence in applications ranging from sensors to servers, including smartphones, tablets and the internet of things. + +Our innovative technology is licensed by Arm Partners who have shipped more than 60 billion system-on-chip (SoC) devices containing Arm intellectual property since the company began in 1990. Together with our Connected Community, we are breaking down barriers to innovation for developers, designers and engineers, ensuring a fast, reliable route to market for leading electronics companies. Learn more and join the conversation at [http://community.arm.com](http://community.arm.com). + +**About Cavium** Cavium is a leading provider of highly integrated semiconductor products that enable intelligent processing in enterprise, data center, cloud and wired and wireless service provider applications. Cavium offers a broad portfolio of integrated, software-compatible processors ranging in performance from 100 Mbps to 100 Gbps that enable secure, intelligent functionality in enterprise, data-center, broadband/consumer and access and service provider equipment. Cavium’s processors are supported by ecosystem partners that provide operating systems, tool support, reference designs and other services. Cavium’s principal office is in San Jose, CA with design team locations in California, Massachusetts, India and China. For more information, please visit: [http://www.cavium.com](). + +**About ENEA** Enea is a global supplier of Linux and real-time operating system solutions, including middleware, tools, databases, and world class services, with a vision to enable communication everywhere. As a trusted and respected player in the embedded software eco system, Enea has for more than four decades delivered value and helped customers develop and maintain ground-breaking products. Every day, more than three billion people around the globe rely on Enea’s technologies in a wide range of applications in multiple verticals – from Telecom and Automotive, to Medical and Avionics. Enea has offices in Europe, North America and Asia, and is listed on NASDAQ OMX Nordic Exchange Stockholm AB. For more information please visit [www.enea.com](https://www.enea.com/) or contact us at info@enea.com. + +**About Freescale Semiconductor** +Freescale Semiconductor (NYSE:FSL) enables secure, embedded processing solutions for the Internet of Tomorrow. Freescale’s solutions drive a more innovative and connected world, simplifying our lives and making us safer. While serving the world’s largest companies, Freescale is also committed to supporting science, technology, engineering and math (STEM) education, enabling the next generation of innovators. [www.freescale.com](https://www.nxp.com/) + +**About MontaVista Software** MontaVista Software, LLC, a wholly owned subsidiary of Cavium, Inc. (NASDAQ: CAVM), is a leading provider of Carrier Grade Linux and Cloud Platform solutions. For over 15 years, MontaVista has been helping Linux developers get the most out of open source by adding commercial quality, integration, hardware enablement, expert support, and the resources of the MontaVista development community. Because MontaVista customers enjoy faster time to market, more competitive device functionality, and lower total cost, more devices have been deployed with MontaVista than with any other Linux. To learn more, please visit http://www.mvista.com + +**About Nokia Networks** +Nokia invests in technologies important in a world where billions of devices are connected. We are focused on three businesses: network infrastructure software, hardware and services, which we offer through Nokia Networks; location intelligence, which we provide through HERE; and advanced technology development and licensing, which we pursue through Nokia Technologies. Each of these businesses is a leader in its respective field. + +Nokia Networks is the world’s specialist in mobile broadband. From the first ever call on GSM, to the first call on LTE, we operate at the forefront of each generation of mobile technology. Our global experts invent the new capabilities our customers need in their networks. We provide the world’s most efficient mobile networks, the intelligence to maximize the value of those networks, and the services to make it all work seamlessly. [http://networks.nokia.com/](http://networks.nokia.com/)   [http://company.nokia.com](http://company.nokia.com) diff --git a/src/content/blogs/linaro-forms-security-working-group.mdx b/src/content/blogs/linaro-forms-security-working-group.mdx new file mode 100644 index 0000000..da6569b --- /dev/null +++ b/src/content/blogs/linaro-forms-security-working-group.mdx @@ -0,0 +1,46 @@ +--- +author: linaro +date: 2014-02-20T12:00:29.000Z +description: Linaro has started a Working Group dedicated to the delivery of + open source reference implementations of secure software on Arm® platforms +link: /news/linaro-forms-security-working-group/ +title: Linaro Forms Security Working Group +tags: [] +related: [] + +--- + +## Linaro has started a Working Group dedicated to the delivery of open source reference implementations of secure software on Arm® platforms + +CAMBRIDGE, UK - 20 FEB 2014 + +Linaro Ltd, the not-for-profit engineering organization developing open source software for the Arm architecture, has formed the Linaro Security Working Group (SWG) to help ensure an optimised and efficient software ecosystem exists to support Arm open source Linux distributions on security related topics, and to accelerate the delivery of high quality secure products across the Arm open source ecosystem. + +Over the last ten years, demand for secure devices in the mobile, home, and other spaces has been met by various advances in technology including Arm’s TrustZone® technology, a key feature of the 32-bit Armv7 and the latest 32/64-bit Armv8 Cortex®-A processors. These processors have provided SoC hardware security features, but to date there has been a limited availability of open source reference software that enables application writers to benefit from these features. As demand for secure software increases, the Linaro Security Working Group aims to reduce potential fragmentation. + +In order to enable applications such as securely booting a server or decoding encrypted media, there needs to be a Trusted Execution Environment (TEE). Linaro will be creating reference designs showing how normal and trusted application code and libraries can be integrated within a particular platform such as Android. Initial Linaro activities include the development of an open source reference implementation of the W3C Embedded Media Extension (EME) using platform security features for secure media playback on mobile and digital home devices; and an open source reference implementation of secure boot for the 64-bit Arm Cortex-A series processors to complement the Arm Trusted Firmware open source project, targeted at server applications. In addition there will be work on security features in the Linux kernel. + +To ensure the broadest commercial choice and applicability for Linaro’s members, the SWG will ensure that the reference applications operate with a range of TEEs, such as open source implementations from Linaro member STMicroelectronics and NVIDIA Corporation, and commercial offerings from Trustonic. + +By delivering tested reference open source software Linaro will enable SoC vendors, OEMs and application developers to more easily understand how to design and build secure applications across a wide range of Arm products and segments. These include the Internet of Things, mobile devices, the digital home and advanced multi-node hyperscale servers. + +“As Linaro’s mandate has expanded beyond mobile to include servers, networking, and the digital home, and as we look forward to the Internet of Things, security is an essential component.” said David Rusling, Linaro CTO, “As security standards emerge it is important that Linaro’s members work together to create and enable interoperable open source solutions that enable the Arm architecture in these markets.” + +“The importance of security in computing, especially personal computing, has been rapidly increasing.” said George Grey, Linaro CEO, “Vendors require a common security foundation on which they can build their own applications. With the Security Working Group, Linaro is in a unique position to provide key software reference solutions to important industry problems that work with the security foundations on Arm platforms. We look forward to helping do this with our members, efficiently and without fragmentation.” + +Supporting Quotes + +“The security working group is an essential initiative by Linaro as it should help the industry accelerate the release of innovative, security-enhanced, Arm-based products to market” said James McNiven, deputy general manager, systems and software, Arm. “Linaro has a proven track record of fuelling code collaboration across the Arm ecosystem. This announcement will build on Arm Trusted Firmware to provide an ideal reference and foundation for low level software on the latest Arm-based platforms.” + +“STMicroelectronics is working closely with the Linaro Security Working Group to make its Trusted Execution Environment freely available for Arm TrustZone,” said Christophe Lorieau,  Director System, Software & Customer Support, Unified Platform Division, STMicroelectronics. “By working with Linaro we can ensure that the open-source implementation is ready to meet the needs of the wider Arm community and will encourage the use of TEE as part of the implementation for their security work on the Arm architecture. Such work will undoubtedly expand the range of secured applications for consumers within the home.” + +“Security in personal computing is a vital concern for both the mobile industry and consumers,” said Hadi Nahari, Chief Security Architect, Tegra Software at NVIDIA. “We’re pleased to work with Linaro and the Security Working Group to bring a common open-source solution to market.” + +“Trustonic is pleased to support Linaro in creating open reference solutions to some of the most common application security problems in modern mobile computing” said Jon Geater, CTO Trustonic. “By ensuring these reference implementations interoperate with Trustonic offerings we hope to benefit the whole Arm ecosystem.” +About Linaro + +Linaro is the place where engineers from the world’s leading technology companies define the future of open source on Arm. The company is a not-for-profit engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top three company contributors to recent Linux kernels (\_LWN lists Linaro as the number 2 company contributor to kernels 3.12 and 3.13 and #3 to kernels 3.10 and 3.11: *[*http://lwn.net/Articles/579081/*](http://lwn.net/Articles/579081/)*, *[*http://lwn.net/Articles/570483/*](http://lwn.net/Articles/570483/)*, *[*http://lwn.net/Articles/563977/*](http://lwn.net/Articles/563977/)*, \_[*http://lwn.net/Articles/555968/*](http://lwn.net/Articles/555968/)). + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. + +For more information on the company, access to software and tools, and information on the community and open engineering, visit [www.linaro.org](/) diff --git a/src/content/blogs/linaro-gains-momentum-demonstrates-progress-accelerating-open-source-development.mdx b/src/content/blogs/linaro-gains-momentum-demonstrates-progress-accelerating-open-source-development.mdx new file mode 100644 index 0000000..11f6087 --- /dev/null +++ b/src/content/blogs/linaro-gains-momentum-demonstrates-progress-accelerating-open-source-development.mdx @@ -0,0 +1,76 @@ +--- +author: linaro +date: 2010-11-10T12:06:52.000Z +description: CAMBRIDGE, UK - 10 NOV 2010 +link: /news/linaro-gains-momentum-demonstrates-progress-accelerating-open-source-development/ +title: Linaro gains momentum and demonstrates progress accelerating open source + development +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 10 NOV 2010 + +## Linaro completes 10.11 release on time and showcases its engineering on multiple Arm Cortex-A9 chips running multiple software distributions + +Linaro (announced at Computex by Arm, Freescale, IBM, Samsung, ST-Ericsson and Texas Instruments) completes its first engineering cycle (10.11) on the 10th November with a demonstration of tools and software improved by Linaro being used on the latest Cortex-A9 based chips. + +* Linaro is building on this momentum and expanding the number of Working Groups to five, covering: Graphics, Multimedia, Power Management, Tools and Kernel. +* Linaro's engineering team has grown from 20 to 70 open source developers with new talent added every month. +* To strengthen Linaro's governance, TI joins Arm on the board along with IBM in representation of all Club Members. +* Linaro's members will be demonstrating multiple software distributions built with Linaro software and tools on multiple dual core Cortex-A9 System on Chip (SoC) products at Arm Techcon (Santa Clara, CA) on the 10th November. +* Linaro has delivered on its first goal of investing directly in open source projects, consolidating, restructuring and tuning code to run optimally on Arm based System on Chip (SoC) solutions from multiple partners in a clean and consistent way. +* All Linaro software - whether development tools or Linux based projects - is open source, on its way upstream and easily available from the website. +* Linaro has created an effective, collaborative and open engineering organisation by bringing talent together from all over the world to fix industry wide problems such as low-level software fragmentation and under-investment in open source projects. +* Linaro has worked on the latest releases of tools, kernel and middleware enabling optimized software for the new range of high performance Cortex-A9 based chips that are entering the market. +* The focus of Linaro is to reduce the cycle time required to develop Linux based products that fully utilise the latest SoC solutions. + +Linaro showcases progress and momentum for its collaborative embedded Linux organisation at Arm Techcon in Santa Clara. In a demonstration that shows Linaro delivering on its mission to make open source development easier, quicker and more optimized, Samsung, ST-Ericsson and TI will be showcasing multiple open source distributions, running on multiple SoCs using code or tools that have been enhanced by Linaro. + +"Linaro can help change the embedded open source world for the better by reducing non-value-add fragmentation and creating a place where the Arm partnership can collaborate to advance open source," said Leonard Tsai, vice president of Compal Innovation Design & Technology. "Everyone will be winners with a diverse range of great connected products that perform better, take less power and are quicker to market." + +### Linaro delivers on launch promises + +At the June Computex launch of Linaro, it was stated that Linaro would be building a team of approximately 100 expert software developers. As of the 10th of November, six months after launch, Linaro has ramped its engineering to over 70 developers and continues to add new talent every month. These open source developers have been working on two key areas: improving development tools and consolidating Linux kernel SoC support for the latest Arm Cortex-A9 and Cortex-A8 based chips. In accordance with its open engineering principles, all Working Groups and platform engineering has been done in the open and is available for inspection on the developer wiki. The output of this work enables a consolidation of the best Linux on Arm and the best open source development tools for Armv7A to be available to everyone. Linaro has added Community resources to its website for those that want to get involved or align engineering without the strategic commitment of Core or Club membership:[www.linaro.org/community](/membership/). + +### Linaro gains momentum + +The rapid increase in open source developers working for Linaro has enabled an expansion of Working Groups for the second engineering cycle (11.05). The addition of three new Working Groups will make it easier to create stunning products with advanced multimedia, graphics and exceptional battery life. To strengthen Linaro's governance, TI joins Arm on the board along with IBM in representation of all Club Members. The new board have recently selected George Grey as CEO to add further experience to Linaro's executive team. Linaro announces today that it is creating a new Advisory group for software distribution owners, so that they have a formal channel to discuss their needs and wants with the TSC who set the Linaro engineering requirements. + +### Linaro demonstrates easier, quicker and more optimized open source\*\*\*\* + +In a significant demonstration at Techcon on 10th of November, Samsung, ST-Ericsson and TI will showcase 3 different software distributions on 3 different Cortex-A9 based SoCs using software or tools that have been enhanced by Linaro's developers + +"These demonstrations show the latest Arm based SoCs running multiple distributions and built with software or tools that have benefited from Linaro's aligned engineering," said George Grey, CEO of Linaro. "By providing the best open source tools and software and helping to enable them on the most advanced Cortex-A9 chips, we are helping to unify and accelerate open source development." + +In the second cycle Linaro will build on this momentum by: + +* Investing in more open source projects relating to graphics, multimedia and power management +* Expanding the number of SoCs which support this software and support for leading edge cores +* Announcing distribution owners as Advisors to Linaro +* Announcing a new member of the TSC + +Companies interested in joining this collaborative venture are invited to discuss membership with the Linaro executive team. For more information on the company or to download software and tools, visit [www.linaro.org](/). + +### Background + +The biggest trend enabling advanced connected, consumer electronics devices is open source software. Connected devices such as smartphones, mobile computing, DTV, STB and infotainment devices are built in their billions every year and are increasingly turning to Linux. Developing embedded devices based on Linux and the many open source projects that make up a typical open source software platform has been challenging for OEMs and ODMs. A mixture of software fragmentation and under investment in open source projects has slowed down the development. Linaro was created to provide a focal point for embedded Linux developers, reducing low level fragmentation and providing aligned investment in open source projects to accelerate open source development for consumer devices. + +### Industry quotes + +"The Linux Foundation welcomes the increase in upstream investment that Linaro has made on behalf of the Arm community," said Jim Zemlin, executive director of Linux Foundation. "The collaborative engineering work Linaro is doing in the Linux kernel will help accelerate innovation in open source." + +"LiMo Foundation commends the fast pace of progress within Linaro and the sustained expansion of its engineering team which is realising reference implementations from multiple silicon vendors," said Morgan Gillis, executive director of LiMo Foundation. "We will continue to align with Linaro's activities through active participation with a view to produce further efficiencies for companies commercialising on the LiMo Platform." + +"Linaro has ramped up its collaborative engineering organisation and is already delivering on its mission to make it easier and quicker to develop optimized open source devices," said Mike Muller, CTO of Arm. "This is the best place for the Arm partnership to work together to deliver the best Linux and open source on the latest Cortex-A class processors." + +"Freescale is committed to simplifying the investment our customers make to bring an i.MX based product to market, and Linaro is a key part of this task," said Bernd Lienhard, vice president and general manager of Freescale's Multimedia Applications Division. "We are committed to Linaro's support for Android and other Linux distributions." + +"IBM is pleased to see that the resources and open source expertise committed by partners to the Linaro organization are already yielding results," said Mark Ireland, vice president of semiconductor products and services at IBM. "Open source based product development will be easier and quicker for OEMs and ODMs as a result of Linaro's aligned engineering and efforts to reduce low-level fragmentation." + +"During the first six months of operations, our engineers have been participating in several Linaro Working Groups and we already see multiple benefits from the initiative," said Enrica Filippi, head of software ecosystem and strategy at ST-Ericsson. "We have gotten access to a network of highly-skilled developers from the open source communities, together with high-quality software patches and tools, enabling our code to move faster upstream and into the Linux kernel source. Linaro is helping both OEMs and semiconductor companies to shorten the development time of Linux devices based on Arm CPUs." + +"TI is thrilled to work hand-in-hand with the Linaro community to accelerate open source initiatives, and to align on the importance of reducing software fragmentation across the industry," said Ari Rauch, senior director of software and system engineering, OMAP™ products and wireless business unit at TI. "We believe that open source Linux - married with high-performance Arm®-based architectures like TI's OMAP 4 processors - will fuel outstanding possibilities for the next-generation of connected devices." + +"As a long time supporter of embedded Linux for Arm processors, MontaVista is excited to be working with Linaro as a commercialization partner," said Jim Ready, CTO of MontaVista Software. "Our goal is to integrate Linaro's open source projects into MontaVista Linux in order to provide commercial support and services for the Arm ecosystem and device manufacturers." diff --git a/src/content/blogs/linaro-joins-the-industrial-internet-consortium.mdx b/src/content/blogs/linaro-joins-the-industrial-internet-consortium.mdx new file mode 100644 index 0000000..03a8ba9 --- /dev/null +++ b/src/content/blogs/linaro-joins-the-industrial-internet-consortium.mdx @@ -0,0 +1,28 @@ +--- +title: Linaro joins the Industrial Internet Consortium +author: linaro +date: 2018-09-11T09:00:00.000Z +description: Linaro joins the Industrial Internet Consortium as a member. +tags: + - iot-embedded + - arm +related: [] + +--- + +Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced that it has joined the Industrial Internet Consortium® (IIC™), the world’s leading organization transforming business and society by accelerating the adoption of the Industrial Internet of Things (IIoT). + +Many IIC members are participating in advanced testbeds across many different industries. Linaro is eager to share its expertise about IIoT, Fog and Edge software with IIC members as part of the consortium. + +Linaro’s new Edge & Fog computing group (LEDGE) is being formed to bring together companies to collaboratively work on open source software engineering in this area and it is expected - as Linaro has demonstrated in other areas - that combining engineering talent from member companies will accelerate development in a way no single company can achieve on its own. Linaro will also contribute software architecture expertise into working groups such as trustworthiness and edge computing. + +“Open source software for Edge and Fog computing is a growing area,” said Dr. Richard Soley, Executive Director, IIC. “The IIC is excited to welcome Linaro as a member and is looking forward to Linaro sharing its unique expertise about open source software.” + +“Collaborating on open-source software and enabling standards for industrial networking, like TSN, are key ways our company is driving embedded processing innovation for industrial applications,” said Pekka Varis, catalog processors technologist at Texas Instruments. “As a member of IIC and a founding member of Linaro, we have seen how both industry standards and open-source software can enable engineers to develop solutions to new challenges in Industrial IoT more quickly.” + +“Linaro, as a leader in collaborative open source engineering, perfectly complements the IIC approach,” said Francois Ozog, Director of the Linaro Edge & Fog Computing Group. “On one side, Linaro can leverage the experience IIC has developed in testbeds and assess requirements to develop missing software components or best practices in the most efficient manner; on the other side, Linaro can provide IIC members with valuable hardware and software experience to find innovative and optimal ways to find solutions to technological challenges.” + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96Boards.org/). diff --git a/src/content/blogs/linaro-launches-96boards-ai-platform.mdx b/src/content/blogs/linaro-launches-96boards-ai-platform.mdx new file mode 100644 index 0000000..b73cd49 --- /dev/null +++ b/src/content/blogs/linaro-launches-96boards-ai-platform.mdx @@ -0,0 +1,53 @@ +--- +keywords: Linaro, Connect, HKG18, 96Boards, Artificial Intelligence, AI, Platforms +title: Linaro Announces Launch of 96Boards AI Platform +description: Linaro announces 96Boards.ai and availability of a range of + compatible member 96Boards platforms for developers. Read more here. +image: linaro-website/images/blog/96boards-ai +tags: + - linaro-connect + - ai-ml +author: linaro +published: true +date: 2018-03-19T00:00:00.000Z +related: [] + +--- + +\[Hong Kong, China, 19 March 2018] Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced [96Boards.ai](https://www.96boards.ai) and availability of a range of compatible member 96Boards platforms for developers working on AI hardware and software solutions. + +“The Linaro Connect keynote demonstration of high performance real-time computer vision and intelligent audio processing, supported by machine learning algorithms and deep learning technology, shows how far developers can now go with readily available 96Boards AI development platforms based on the latest SoCs” said George Grey, Linaro CEO. “What’s most impressive is the choice of platforms that developers can use today to innovate and produce their own AI enabled products.” + +Linaro members HiSilicon, Socionext, and Xilinx, and new 96Boards steering committee member Rockchip have all released 96Boards development products with AI hardware capabilities. Additionally, Qualcomm Technologies, Inc., plans to join 96Boards.ai with the DragonBoard™ 820c development board, featuring the Qualcomm® Snapdragon™ 820E embedded platform. Together, these platforms offer an unrivalled choice of heterogeneous compute solutions for AI development. These products offer different combinations of CPU, GPU, NPU, DSP and FPGA with both on-die and independent acceleration supporting hard-coded, optimized performance as well as programmable flexibility. + +The 96Boards.ai hardware platforms offer access to the latest Arm-Powered accelerated AI hardware and vendor supplied software for Android and Linux. Linaro is planning a collaborative AI software project to interface industry standard AI platforms and libraries to the multiple hardware IP solutions being offered by industry leading vendors. The goal is to accelerate innovation by removing the need for redundant and fragmented engineering effort on each proprietary solution. By working together on shared, open source software foundations, more resources can then be applied to optimizing use case development on each vendor’s value-added hardware IP. + +“In the last three years, 96Boards has established open hardware standards around which the ecosystem is creating a broad range of compatible, standardized compute platforms that provide access to the latest SoC technology,” said Yang Zhang, 96Boards Director and Chief Scientist of the Chinese Academy of Sciences AIRIA. “96Boards.ai brings together the latest SoCs with AI acceleration capability from multiple world-leading silicon vendors to provide the most advanced development and prototyping platforms for intelligent products. This new open AI platform will empower algorithm developers, researchers, product designers and SoC vendors to focus on their own, unique value-added differentiation.” + +In addition to the choice of AI compute platforms, the 96Boards ecosystem offers a range of mezzanine and peripheral products to enable advanced audio, video and sensor interfaces. With the standard form factor and flexible interface options, compatible sensors will work with all conforming 96Boards platforms. This standardization enables developers to quickly pick and choose sensors and compute platforms for a broad range of embedded AI applications, from personal assistants and automotive driver assistance to smart cities, forensic investigation and drug discovery. + +“As an inaugural member of the 96Boards.ai community we are excited to deliver to customers performance that is orders of magnitude higher than conventional microprocessors,” said Tomas Evensen, Chief Technology Officer of Embedded Software Xilinx. “As the world’s leading supplier of field programmable gate arrays (FPGAs), we continue to make great strides in all facets of AI acceleration. Xilinx’s unique heterogeneous combination of programmable elements, including; applications processors, real-time processors, digital signal processing elements and of course the FPGA logic fabric, gives us a unique advantage in bringing an unprecedented amount of performance and flexibility to the 96Boards.ai community. The new Ultra96 board, available from Avnet Electronics, is the flagship low-cost vehicle to demonstrate those benefits.” + +“Socionext has been working with Linaro on an AI enabled 96Boards solution,” said Shuichi Yamane, Sub-leader of S3 Project, at Socionext. “Through the activities of the 96Boards community, we expect an open source platform that can fully utilize the features of Socionext AI solution will help to expand AI use cases significantly in the future.” + +The DragonBoard 820c development board supports a wide range of interfaces and is ideal for prototyping complex embedded computing applications including for VR and AR, machine vision and artificial intelligence for commercial drones and robots. The Dragonboard 820c features the Snapdragon 820E embedded platform with a 64-bit Armv8 -compliant quad-core Qualcomm® Kryo™ CPU, Qualcomm® Adreno™ 530 GPU and Qualcomm® Hexagon™ 680 DSP. Qualcomm Technologies aims to provide support for Linux on its Snapdragon Neural Processing Engine SDK in the second half of 2018 for use on DragonBoard 820c, so developers can take advantage of the Snapdragon 820E heterogeneous hardware architecture to accelerate on-device AI applications. + +“It is a great honour for HiKey970 to join 96Boards.ai as a popular development platform for AI application development. HiKey970 is the third generation of HiKey series in 96boards. It is the world’s leading AI-enabled development platform highlighting with powerful computing power, richer hardware interfaces, and supporting mainstream operating systems and AI stacks. The HiKey970 integrates Huawei's HiAI framework and other mainstream neural network frameworks, supporting both CPU and GPU AI calculations and NPU-based neural network computing hardware acceleration, which can greatly help on-device AI development.” said Eric Zhou, product marketing director of Huawei wireless terminal chipset BU. “We hope to bring developers an easy-to-use AI development platform. Much appreciation to our partners Linaro, Arm, Hoperun, LeMaker, and all the engineers, without their effort, we can’t achieve such a challenging task.” + +“As a world-leading smart system technology, service and solution provider, ThunderSoft is always the believer in open standards and collaborative innovation. That’s why we are committed to build AI kit and solutions around 96Boards Open AI platforms. With our years of expertise and experience in smart system development, Thundersoft will enable the next generation of embedded AI innovation based on 96Boards.ai platforms.” said Pengcheng Zou, CTO of ThunderSoft. + +“We're excited to join Linaro as a 96Boards steering committee member, Rockchip is very committed to the open society, and Rock960 will be a very competitive AI platform for the community. We're looking forward to developing a great AI product base on that,” said Feng Chen, Chief Marketing Officer of Rockchip. + +“Hoperun is a 96Boards steering committee member and Manufacturing partner, launching partner of the HiKey970, 96Boards.ai provider and promoter, committed to build AI kit and solutions around the 96Boards Open AI platform,” said Hoperun CEO Chen Bin. + +96Boards is Linaro’s initiative to build a single software and hardware community across cost-effective development boards based on Arm technology. + +## About Linaro + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low-level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.96boards.ai/](https://www.96boards.ai/), [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96Boards.org/). + +*Qualcomm, Snapdragon, Adreno, Hexagon, Kryo and DragonBoard are trademarks of Qualcomm Incorporated, registered in the United States and other countries. Other products or brand names may be trademarks or registered trademarks of their respective owners.* + +*Qualcomm Snapdragon, Qualcomm Kryo, Qualcomm Adreno, and Qualcomm Hexagon areproducts of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/src/content/blogs/linaro-launches-android-build-portal-engineering-group-pages.mdx b/src/content/blogs/linaro-launches-android-build-portal-engineering-group-pages.mdx new file mode 100644 index 0000000..fdb6128 --- /dev/null +++ b/src/content/blogs/linaro-launches-android-build-portal-engineering-group-pages.mdx @@ -0,0 +1,30 @@ +--- +author: linaro +date: 2013-06-19T11:26:14.000Z +description: CAMBRIDGE, UK - 19 JUN 2013 +link: /news/linaro-launches-android-build-portal-engineering-group-pages/ +title: Linaro Launches Android Build Portal and Engineering Group Pages +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 19 JUN 2013 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, today announced its new [Android build portal ](/client-devices/)and [Engineering Group pages](/core-technologies/). + +The [Android build portal](/client-devices/) is designed to make it easier to share news about the latest Android developments at Linaro. The site includes sections with access to key Android information, including the latest news, videos and blogs related to Linaro’s Android work. It also includes important links to setting up a build and deploying images onto the Linaro members’ platforms. The[Engineering Group page](/core-technologies/)s provide deep links into the activities of Linaro’s Engineering Groups. + +Individuals who want to get started with Android builds from Linaro will find the latest releases for members’ hardware and community builds for other devices on the portal. This material can also be used to produce custom builds of Android or as a starting point for engineers who want to contribute to Linaro’s Android work.  In addition to the expanded listing of news and builds the new portal gives access to the daily IRC chats so that the community can easily be a part of the development as it is happening. This new portal is an important part of Linaro’s commitment to leading the development of Linux on Arm by communicating openly and making it easier to share information about Linaro’s Android builds with the community. [Visit the new Linaro Android build portal.](/client-devices/) + +Since Linaro was founded in June 2010, collaborative engineering work has been done in engineering groups focused on the[Kernel](/core-technologies/toolchain/), [Toolchain](/core-technologies/toolchain/), [Graphics](/client-devices/), [Power Management ](/core-technologies/toolchain/)and Platform Engineering. More recently, new vertically-focused segment groups have been formed and Linaro now has two of these: the [Linaro Enterprise Group (LEG) ](/cloud-computing-and-servers/)and Linaro Networking Group (LNG). The new Engineering Group pages provide a single point of access to all the web locations that each of groups use, including Linaro Wiki links to meeting minutes and white papers and IRC channels for daily discussion. + +**Join us at Linaro Connect** + +Linaro Connect is held every three to four months to bring the Linux on Arm community together to work on the latest system-on-chip (SoC) developments, plan new engineering efforts and hold engineering hacking sessions. These events give the Linux community an opportunity to be a part of the Linaro team and help to define the Arm tools, Linux kernels and builds of key Linux distributions including Android and Ubuntu on member SoCs. The next Linaro Connect will be July 8-12th in Dublin, Ireland. + +About Linaro + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 140 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. diff --git a/src/content/blogs/linaro-launches-opendataplane-odp-project-deliver-open-source-cross-platform-interoperability-networking-platforms.mdx b/src/content/blogs/linaro-launches-opendataplane-odp-project-deliver-open-source-cross-platform-interoperability-networking-platforms.mdx new file mode 100644 index 0000000..88ae4ea --- /dev/null +++ b/src/content/blogs/linaro-launches-opendataplane-odp-project-deliver-open-source-cross-platform-interoperability-networking-platforms.mdx @@ -0,0 +1,87 @@ +--- +excerpt: Industry leaders in the Linaro Networking Group (LNG) are collaborating + to develop and host an open standard application programming interface (API) + for data plane applications on www.opendataplane.org +title: Linaro launches OpenDataPlane™ (ODP) project to deliver open-source, + cross-platform interoperability for networking platforms +description: The Linaro Networking Group are collaborating to develop an open + standard application programming interface for data plane applications. Read + more here! +image: linaro-website/images/blog/30921188158_953bca1c9f_k +author: linaro +date: 2013-10-29T12:31:46.000Z +link: /news/linaro-launches-opendataplane-odp-project-deliver-open-source-cross-platform-interoperability-networking-platforms/ +tags: [] +related: [] + +--- + +SANTA CLARA, US - 29 OCT 2013 + +## Industry leaders in the Linaro Networking Group (LNG) are collaborating to develop and host an open standard application programming interface (API) for data plane applications on www.opendataplane.org + +Linaro, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced the launch of OpenDataPlane (ODP) and associated website [www.opendataplane.org](http://www.opendataplane.org/). + +The ODP deliverable will be a data plane application programming environment designed to enable software portability between networking SoCs, regardless of the underlying instruction set architecture (ISA). The 12 Linaro Networking Group (LNG) member companies represent a cross section of networking original equipment manufacturers (OEMs), system on chip (SoC) companies and independent software vendors (ISVs), collectively having stakes in common data plane programming models for all the leading networking processor architectures. + +“ODP is a good example of what the Linaro vertical segment groups are designed to achieve,” said George Grey, CEO of Linaro. “A group of industry stakeholders identify a strong demand for a common software model optimized across multiple hardware platforms. They then combine efforts to minimize redundant engineering effort and fragmentation, and accelerate an open source solution offering the flexibility, performance, scalability, interoperability and reliability that the industry requires.” + +The ODP environment will consist of a common application programming interface (API) software layer, configuration files, services and utilities. It will support multiple underlying implementations, ranging from pure software to those embracing and extending existing vendor-specific software development kits (SDKs) highly-optimised for the underlying hardware. This follows standards such as OpenCL and OpenGL that just define the user level API and not the implementation. Networking applications will be written directly on top of the ODP environment to provide platform interoperability. + +The data explosion in communication networks is driving the need for innovation and collaboration among industry stakeholders. Many new industry initiatives such as network function virtualization (NFV) have emerged, but much of this work is concentrated at the management, orchestration, services and control layers of the network. Since these layers will overlay onto the optimized data plane or forwarding plane hardware from different vendors, each with their own legacy SDK tuned to their individual platform, it is essential to develop a common interface across different SoC designs and silicon architectures. ODP will provide this with the first truly open source, cross platform solution designed and developed by a significant cross section of networking leaders, all of whom are members of the Linaro Networking Group. + +The ODP standard will be developed in the open. The project API will adopt a BSD 3-Clause license so that companies can produce derivative applications based on ODP, without compromising their unique intellectual property. Furthermore this will allow vendors to support ODP across multiple architectures. Such support is already planned by multiple software companies and vendors. Once published, the project maintainers will seek feedback and contributions from the broad community to evolve and refine the software to meet the widest possible industry needs. + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 180 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial grade software, Linaro executes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit[](/). + +*Media contact:* Steve Taylor, media@linaro.org + +ODP Supporting Quotes From LNG Members + +**Arm** + +“We view this initiative for software compatibility at the network device layer as a fundamental building block to delivering scalable networks incorporating services flexibility,” said Ian Drew, SVP Marketing Arm. “Network operators want and need choice and there is no better forum of network stakeholders focused at this crucial layer of the optimized next generation network to provide the best possible solution.” + +*About Arm:* Arm designs the technology that lies at the heart of advanced digital products, from wireless, networking and consumer entertainment solutions to imaging, automotive, security and storage devices. Arm’s comprehensive product offering includes 32-bit RISC microprocessors, graphics processors, video engines, enabling software, cell libraries, embedded memories, high-speed connectivity products, peripherals and development tools. Combined with comprehensive design services, training, support and maintenance, and the company’s broad Partner community, they provide a total system solution that offers a fast, reliable path to market for leading electronics companies. See [www.arm.com](http://www.arm.com/) for more information. + +**Cavium** + +“For over 8 years Cavium customers have benefitted from our comprehensive fast-path development environment and APIs that seamlessly integrate our world-leading hardware acceleration technology with our Linux compatible data plane acceleration software,” said Imran Badr, VP of Software Engineering, Cavium. “We support and applaud Linaro’s leadership in spearheading the OpenDataPlane (ODP) API that will modularize and standardize Data Plane acceleration software across multiple vendors and architectures. This standardization will foster and accelerate the pace of innovation available to enterprise and service providers. With ODP Cavium will provide its customers with an industry standard application layer API that will provide leading networking and compute performance by virtualizing and abstracting our unique hardware acceleration across our MIPS64 and Armv8 processor families.” + +*About Cavium:* Cavium is a leading provider of highly integrated semiconductor products that enable intelligent processing in networking, communications and the digital home. Cavium offers a broad portfolio of integrated, software compatible processors ranging in performance from 10 Mbps to over 100 Gbps that enable secure, intelligent functionality in enterprise, data-center, broadband/consumer and access & service provider equipment. Cavium’s processors are supported by ecosystem partners that provide operating systems, tool support, reference designs and other services. Cavium’s principal offices are in San Jose, California with design team locations in California, Massachusetts, India and China. For more information, please visit:[www.cavium.com](https://www.marvell.com/). + +**ENEA** + +“As a world leading operating systems solutions provider for Networking, we see OpenDataPlane as a much awaited step forward, driving speed and innovation in the IP application and transport domain”, said Tobias Lindquist, CTO, Enea. “ODP will be a business enabler for us, where we can continue to compete with new offers, granting our customers a greater re-use of software resources and a simpler migration between hardware platforms.” + +*About Enea:* Enea is a global vendor of Linux and Real-time operating system solutions including middleware, tools, protocols and services. The company is a world leader in developing software platforms for communication-driven products in multiple verticals, with extreme demands on high-availability and performance. For more information, please visit: [www.enea.com](https://www.enea.com/). + +**LSI** + +"LSI is a leader in developing networking products to support our customer’s needs for real time, high performance semiconductor products for latency sensitive applications,“ said Cristina Rodriguez, Director of Software Engineering, LSI.  “LSI has been driving the Linaro Networking Group’s Open DataPlane (ODP) initiative to provide a level of abstraction that will enable a seamless migration across divergent CPU architectures while striving to retain focus on the fundamental use cases that are vital for our customers to meet their deployment goals." + +*About LSI:* LSI Corporation designs semiconductors and software that accelerate storage and networking in datacenters, mobile networks and client computing. Our technology is the intelligence critical to enhanced application performance, and is applied in solutions created in collaboration with our partners. More information is available at [www.lsi.com](http://www.lsi.com/). + +**MontaVista** + +“MontaVista is a leader in providing virtualized platforms for the network market with our market leading Carrier Grade Edition product. OpenDataPlane project provides a powerful and common way to present interfaces to I/O and other SoC capabilities across multiple architectures while maintaining the performance requirements of next generation network devices,” said Sanjay Raina, President of MontaVista. “MontaVista is collaborating with Linaro Network Group (LNG) to build the OpenDataPlane solution and will be integrating the technology into our market leading multi-architecture Carrier Grade Edition 7 platform.” + +*About MontaVista:* MontaVista Software, LLC, a wholly owned subsidiary of Cavium, Inc., is a leader in embedded Linux commercialization. For over 10 years, MontaVista has been helping embedded developers get the most out of open source by adding commercial quality, integration, hardware enablement, expert support, and the resources of the MontaVista development community. Because MontaVista customers enjoy faster time to market, more competitive device functionality, and lower total cost, more devices have been deployed with MontaVista than with any other Linux. To learn more, please visit http://www.mvista.com/. + +**Nokia Solutions and Networks** + +“According to NSN Technology Vision 2020 mobile networks will be required to deliver one Gigabyte of personalized data per user per day profitably to support the ever increasing consumer needs with a host of mobile applications. “To realize this vision, we need to develop new capabilities and technologies. OpenDataPlane is a big step towards our networking vision as it brings together all networking SoCs enabling portability, high capacity and power efficient implementation,” says Jarmo Hillo, Head of Processor Technology at Nokia Solutions and Networks. + +*About Nokia Solutions and Networks:* Nokia Solutions and Networks is the world’s specialist in mobile broadband. From the first ever call on GSM, to the first call on LTE, we operate at the forefront of each generation of mobile technology. Our global experts invent the new capabilities our customers need in their networks. We provide the world’s most efficient mobile networks, the intelligence to maximize the value of those networks, and the services to make it all work seamlessly. + +With headquarters in Espoo, Finland, we operate in over 120 countries and had net sales of approximately 13.4 billion euros in 2012. NSN is wholly owned by Nokia Corporation. + +**Texas Instruments (TI)** + +“TI is excited to be a part of the development of ODP. It gives customers an open and standardized approach to software making it easier to develop with.” said Pekka Varis, chief technology officer for DSP and Multicore at TI. “ODP is a step in the right direction for customers needing high performance networking software.” + +*About TI:* Texas Instruments semiconductor innovations help 90,000 customers unlock the possibilities of the world as it could be – smarter, safer, greener, healthier and more fun. Our commitment to building a better future is ingrained in everything we do – from the responsible manufacturing of our semiconductors, to caring for our employees, to giving back inside our communities. This is just the beginning of our story. Learn more at [www.ti.com](http://www.ti.com/). diff --git a/src/content/blogs/linaro-makes-snowball-shine.mdx b/src/content/blogs/linaro-makes-snowball-shine.mdx new file mode 100644 index 0000000..e581c23 --- /dev/null +++ b/src/content/blogs/linaro-makes-snowball-shine.mdx @@ -0,0 +1,22 @@ +--- +author: linaro +date: 2011-02-15T12:10:12.000Z +description: CAMBRIDGE, UK - 15 FEB 2011 +link: /news/linaro-makes-snowball-shine/ +title: Linaro Makes Snowball Shine +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 15 FEB 2011 + +## Linaro provides optimized base of software and tools for new low cost board (Snowball) using ST-Ericsson's dual-core Arm® CortexTM- A9 based AP9500 chipset. + +Linaro welcomes the news that the open source community will be able to buy low cost, high performance Snowball development boards using ST-Ericsson's AP9500 chipset and run free open source software and tools that have been optimized by Linaro. + +Software developers now have greater access to the most advanced application processors that can provide the performance of a PC in the power footprint of mobile. The Snowball Software Development Kit and Product Development Kit significantly extends the choice for software developers who want to create great code and products on a platform that costs less than a netbook, but with greater performance and longer battery life. The Snowball development boards combine powerful dual core Arm Cortex-A9 and Arm Mali™ GPU processors with open source software that has been optimized by Linaro. + +Linaro exists to enable the latest open source software on the latest system on chips and help to accelerate innovation among software developers. The current wave of "always-connected, always-on" devices are increasingly turning to Linux and highly integrated SoCs to achieve the performance and battery life consumers demand. + +"Developers will be able to come to the Linaro website and download the latest kernel, board support package and tools and know they are getting a great foundation to base their code or product on." said Stephen Doel, COO, Linaro.  "Our evaluation builds and validation test farm will ensure that there is a quick way to utilise our engineering directly with the major software distributions on low cost boards such as the Snowball SDK and PDK." diff --git a/src/content/blogs/linaro-names-george-grey-ceo.mdx b/src/content/blogs/linaro-names-george-grey-ceo.mdx new file mode 100644 index 0000000..4091570 --- /dev/null +++ b/src/content/blogs/linaro-names-george-grey-ceo.mdx @@ -0,0 +1,27 @@ +--- +title: Linaro names George Grey as CEO +description: CAMBRIDGE, UK - 26 OCT 2010 +image: linaro-website/images/blog/george_grey_hi_res +author: linaro +date: 2010-10-26T11:01:05.000Z +link: /news/linaro-names-george-grey-ceo/ +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 26 OCT 2010 + +Linaro, at its inaugural developer conference, announced that its board of directors has named George Grey as the company's chief executive officer. Grey's 27-year [career](https://www.linaro.org/careers/) has included a number of software and hardware high-tech companies in Europe and the U.S. Prior to this role, he was founder and CEO of Mobicious, a mobile content start-up, and president and COO of SavaJe, a Java-based smart phone operating system. + +Linaro was created to be an independent company to address the challenges associated with accelerating open source software (OSS) on systems on chip. (SoC). It was launched by its founding companies - Arm, IBM, Freescale, Samsung, ST-Ericsson and Texas Instruments - at Computex in June, with an interim executive leadership team provided by Arm.  Acting Executive Director Tom Lantzsch will resign from his role immediately and become an advisor to the company, while Ben Cade will remain as an executive director, sharing the office with Grey, until year end. + +Linaro is meeting all of its operational goals since formation. In less than five months, Linaro has grown quickly from 10 engineers to 70, and is on track toward its goal of 100 employees by year end. The first product release will occur on time, and it will be announced on Nov. 10 at Arm TechCon in Santa Clara, Calif., supported by presentations, partner demonstrations and a webinar. Currently Linaro is hosting its first weeklong Developer Summit in Orlando, Fla. + +"We are ecstatic to have been able to recruit someone of George's talent," said Tom Lantzsch, departing executive director of Linaro. "He has a wealth of experience managing software companies that will be critical for Linaro's next phase of success. I am returning to Arm knowing Linaro is in competent hands." + +"The board were unanimous in their selection of George, based on his track record of leading and growing technology companies, his hands-on approach to leadership and personal qualities that align with the organization's ethos," said Ben Cade, executive director, Linaro. "I am looking forward to working beside George during this transition period to ensure the fantastic progress we have made continues." + +"Linaro is already contributing to improving tools, aligning SoC support, consolidating the Arm ecosystem around a common base and working directly in upstream projects," said Grey. "I look forward to working with Linaro's members, management, engineering team, and partners to accelerate this further and to deliver on our mission." + +Linaro will be presenting on the accomplishments of the first engineering cycle and plans for the second at the following conferences: Embedded Linux Conference Europe (ELCE), Cambridge UK; LPC, Cambridge, Mass.; Arm Techcon, Santa Clara, Calif.; and the Arm Asia Technical conferences. diff --git a/src/content/blogs/linaro-participate-open-compute-summit.mdx b/src/content/blogs/linaro-participate-open-compute-summit.mdx new file mode 100644 index 0000000..24b9150 --- /dev/null +++ b/src/content/blogs/linaro-participate-open-compute-summit.mdx @@ -0,0 +1,28 @@ +--- +author: linaro +date: 2013-01-10T12:22:58.000Z +description: CAMBRIDGE, UK - 10 JAN 2013 +link: /news/linaro-participate-open-compute-summit/ +title: Linaro to Participate in Open Compute Summit +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 10 JAN 2013 + +Linaro, the not-for-profit engineering organization developing open source software for the Arm architecture, announced that they will be participating in the Open Compute Summit being held January 16 -17, 2013 at the Santa Clara Convention Center. The Open Compute Summit is an International conference sponsored by the Open Compute Project and is focused on companies and projects in the industry that are collectively developing the most efficient computing infrastructure possible. + +Linaro is a sponsor of the event and will be both exhibiting and presenting at the event. The time and topic of their talk is: + +* January 16th during the industry luminaries session from 3:30pm to 6:00pm + + * George Grey, CEO of Linaro, will present "Building interoperability in low-power SOCs" + +Linaro will also have a booth at the event and be presenting and answering questions regarding Linaro's work in consolidating and optimizing open source software for the Arm architecture. + +#### **About Linaro:** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 120 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro's work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro's engineering work is open to all online. diff --git a/src/content/blogs/linaro-partners-samsung-ecosystem-deliver-exciting-new-low-cost-board-origen-open-source-developers.mdx b/src/content/blogs/linaro-partners-samsung-ecosystem-deliver-exciting-new-low-cost-board-origen-open-source-developers.mdx new file mode 100644 index 0000000..7a18688 --- /dev/null +++ b/src/content/blogs/linaro-partners-samsung-ecosystem-deliver-exciting-new-low-cost-board-origen-open-source-developers.mdx @@ -0,0 +1,27 @@ +--- +author: linaro +date: 2011-05-30T11:12:47.000Z +description: TAIPEI, TAIWAN - 30 MAY 2011 +link: /news/linaro-partners-samsung-ecosystem-deliver-exciting-new-low-cost-board-origen-open-source-developers/ +title: Linaro partners with Samsung ecosystem to deliver exciting new low cost + board ‘Origen’ to Open Source developers +tags: [] +related: [] + +--- + +TAIPEI, TAIWAN - 30 MAY 2011 + +## Linaro provides optimized base of software and tools for highest performance low cost Arm board 'Origen' using Samsung's dual-core Arm® Cortex-A9 based Exynos 4210 chipset + +Linaro welcomes the news that developers will be able to buy low cost, high performance 'Origen' development boards using Samsung's Exynos 4210 chipset, and run free open source software and tools that have been optimized by Linaro. + +Software developers now have low cost access to the high performance Exynos mobile computing platform that packs features such as the latest multi-core Cortex-A9 CPU and multi-core Mali400 GPU, with 1GB of high end DDR3 memory. This board will be popular with developers of tablets, smartphones and other connected screens where leading edge performance, stunning graphics and the ability to drive HD displays is important. Linaro will provide source code downloads of Linaro Evaluation Builds of Android and Ubuntu from its website. The Linaro Evaluation Builds will be built with the latest stable Linux kernel and toolchain, and will deliver leading edge performance on a sub-$200 board. The provision of product quality software by Linaro will reduce time to market and ease the process of product creation using open source software and tools. + +"The Origen board and Linaro's provision of software and tools makes it easier, faster and less expensive for developers and companies to develop high-end embedded Linux products," said George Grey, CEO Linaro. "Origen provides outstanding performance for tablets, smartphones and a wide range of embedded Linux products, and we are excited to be offering optimized open source components and builds of Android and Ubuntu for this development platform." + +"The new low cost 'Origen' board and consolidated software packages from Linaro are expected to enable easy and quick development of mobile platforms incorporating scalability of rich-features on a cutting-edge Exynos 4210 application processor," said Dojun Rhee, vice president, System LSI Marketing, Samsung Electronics. "We are confident that our customers and the open source community will be able to develop their products in a timely manner through the innovative Exynos platform." + +The 'Origen' integrates mobile platform orientated features such as interfaces for HDMI, SD card, WiFi, Bluetooth® wireless technology, Stereo audio, LCD, JTAG debug and Camera. To enable future upgrade options the Exynos processor is mounted on a small daughter board together with high bandwidth DDR3 memory. The 'Origen' board will be available through its manufacturer at [www.insignal.co.kr](http://www.insignal.co.kr/) + +Linaro exists to enable the latest open source software on advanced Arm SoCs, and to help to accelerate innovation among software developers. The current wave of "always connected, always on" devices are increasingly turning to Linux and highly integrated SoCs to achieve the performance and battery life consumers demand. For more information on the company, access to software and tools, and information on the community and open engineering, visit [www.linaro.org](/) diff --git a/src/content/blogs/linaro-q1-2011-update.mdx b/src/content/blogs/linaro-q1-2011-update.mdx new file mode 100644 index 0000000..ad53f17 --- /dev/null +++ b/src/content/blogs/linaro-q1-2011-update.mdx @@ -0,0 +1,14 @@ +--- +author: linaro +date: 2011-01-21T12:09:37.000Z +description: CAMBRIDGE, UK - 21 JAN 2011 +link: /news/linaro-q1-2011-update/ +title: Linaro Q1 2011 Update +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 21 JAN 2011 + +Learn about the latest developments and product plans. [Download Q1 update]() diff --git a/src/content/blogs/linaro-q3-update.mdx b/src/content/blogs/linaro-q3-update.mdx new file mode 100644 index 0000000..2e7a157 --- /dev/null +++ b/src/content/blogs/linaro-q3-update.mdx @@ -0,0 +1,16 @@ +--- +author: linaro +date: 2010-08-24T10:52:43.000Z +description: CAMBRIDGE, UK - 24 AUG 2010 +link: /news/linaro-q3-update/ +title: Linaro Q3 update +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 24 AUG 2010 + +Learn about the latest developments and product plans. + +For more information on the company, access to software and tools, and information on the community and open engineering, visit www.linaro.org diff --git a/src/content/blogs/linaro-updates-schedule-list-keynote-speakers-linaro-connect-las-vegas-2016.mdx b/src/content/blogs/linaro-updates-schedule-list-keynote-speakers-linaro-connect-las-vegas-2016.mdx new file mode 100644 index 0000000..6140613 --- /dev/null +++ b/src/content/blogs/linaro-updates-schedule-list-keynote-speakers-linaro-connect-las-vegas-2016.mdx @@ -0,0 +1,41 @@ +--- +title: Linaro Updates Schedule and List of Keynote Speakers for Linaro Connect + Las Vegas 2016 +description: "Cambridge, UK: 1 September 2016" +image: linaro-website/images/blog/48784720458_63040ac998_k +tags: + - linaro-connect + - linux-kernel + - open-source +author: linaro +date: 2016-09-01T19:13:11.000Z +link: /news/linaro-updates-schedule-list-keynote-speakers-linaro-connect-las-vegas-2016/ +related: [] + +--- + +Cambridge, UK: 1 September 2016 + +[Linaro](/), the not-for-profit engineering organization developing open source software for the Arm architecture, today updated the schedule and full list of keynote speakers for the upcoming [Linaro Connect Las Vegas 2016](https://resources.linaro.org/en/tags/42541423-4061-409a-9ec6-ea2184ffe68c) (LAS16) that will take place September 26-30th. Join the over 400 already registered engineers to discuss the latest Arm open source software engineering by [registering](https://www.eventbrite.co.uk/e/linaro-connect-las-vegas-2016-las16-tickets-21812925046?mc_cid=885b42f55f\&mc_eid=7fd0e8f93e) today. The event will feature several keynote speakers covering topics including open source benefits for IoT and embedded applications, open source robotics, the challenges of open source in a corporate environment, the importance of end to end security and community 3.0. Linaro’s Chief Executive Officer, George Grey, will kick-off the event with announcements about Linaro’s new development directions and demonstrations of ongoing engineering work. + +The keynote speakers currently scheduled for Linaro Connect LAS16 are: + +* George Grey, **Linaro** CEO, on Monday 26th September +* Morgan Quigley, **Open Source Robotics Foundation** Chief Architect, on Monday 26th September +* Sarah Sharp, **Otter Tech** Founder, on Tuesday 27th September +* Geoff Thorpe, **NXP** Head of Security Center of Excellence, on Wednesday 28th September +* Brian Richardson, **Intel** Senior Technical Marketing Engineer, on Thursday 29th September +* Jono Bacon, **Jono Bacon Consulting** Founder, on Friday 30th September + +The keynotes are followed by discussion sessions, presentations and lots of engineering hacking. The agenda covers a broad range of open source topics, with a special focus on the Monday on IoT and Embedded, Tuesday on Mobile, Wednesday on Home and Servers, Thursday on Networking and Friday on Community. In addition, Linaro Connect will be hosting three mini conferences after the keynotes on the Tuesday, Wednesday and Thursday. All attendees are welcome to join. The topics for the conferences are: + +* Firmware on Tuesday 27th September +* Cortex-M Software on Wednesday 28th September +* AOSP on Thursday 29th September + +The theme for LAS16 is *Engineers and Devices Working Together.*  Linaro’s leadership in the Arm ecosystem relies on engineers from many different companies working together with the latest technology to develop and optimize software. The collaborative engineering process is key to Linaro’s success and is what makes being part of Linaro so unique. LAS16 will be all about the engineers, the process involved in building and testing software, and enjoying working with technology. Linaro Connect LAS16 will be held at the beautiful JW Marriott Hotel in Las Vegas, Nevada. + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [](/) and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/meltdown-spectre.mdx b/src/content/blogs/meltdown-spectre.mdx new file mode 100644 index 0000000..4571edc --- /dev/null +++ b/src/content/blogs/meltdown-spectre.mdx @@ -0,0 +1,211 @@ +--- +keywords: Meltdown, Spectre, Arm, OP-TEE, Trustzone, Speculative execution, + branch predictor, CPU cache, Set-Associative-Cache, side channel attack, + Simple Power Analysis, Differential Power Analysis, crypto, +title: Implications of Meltdown and Spectre : Part 1 +description: In this article, Joakim Bech looks at the implications of meltdown + & spectre in practice and how it could affect secure domains like TrustZone. + Read more here! +image: linaro-website/images/blog/meltdown-spectre-logo +tags: + - arm +author: joakim-bech +published: true +date: 2018-01-24T11:00:00.000Z +related: [] + +--- + +
+# Implications of Meltdown and Spectre + +By now everyone has heard about [Meltdown](https://meltdownattack.com/) and [Spectre](https://spectreattack.com/), but let us try to discuss what it really means in practice and also how it could potentially affect secure domains like [TrustZone](https://www.arm.com/products/security-on-arm/trustzone). The basis of the Meltdown and Spectre attacks is to make use of speculative execution, out-of-order execution, branch predictors and caches, all features found in modern CPUs. These features all aim to increase performance and to avoid latencies in the pipeline. They work in conjunction with each other in quite complex ways. Let us go over them one by one to get a better understanding of the essence of Meltdown and Spectre. + +
+
+ +
+ +

+ +# Speculative execution + +In short, [*speculative execution*](https://en.wikipedia.org/wiki/Speculative_execution) is about doing work that might happen in the future. If that execution does not happen in practice, then we will just throw the results away. As an analogy you can compare this with when you as a programmer are writing some code in general. You are using Git for example to make "snapshots" at various points (*git commit*, *git checkout -b foo* etc). So let’s for example say that you have a stable setup, but you want to explore something new. What do you do? You create another branch and start working with your new idea. If it turns out to be something good, then you can just commit the changes directly to the stable branch, but if it turned out to be a bad idea, then you can just throw away the work. The penalty you have to pay when throwing it away is that you have spent time on doing work that turned out to be unnecessary. Although a trivial example, it describes the basic ideas behind speculative execution. In a computer the CPU will do similar things. It fetches instructions and data where some instructions might take more time than others, so instead of just letting the CPU sit and wait for some instructions to complete, the CPU will continue doing some work by executing pending instructions. What that means is that code in branches that are not taken will eventually still execute due to the speculative execution done by the CPU. + +![Speculative execution](/linaro-website/images/blog/speculative-execution) + +Set aside for a moment that the compiler probably would remove `buffer[pos]` in the example above in a real scenario. With that in mind and albeit simplified, the example serves as a good example of what eventually could happen due to speculative execution. Here it could be that `buffer[pos]` are being speculatively executed even though `pos` is greater than `limit`. In practice this would lead to "out-of-bounds" access by the speculative execution. This is the key thing in one of the Spectre attacks that we will discuss in more detail further down. + +*** + +# Out-of-order execution + +Another CPU feature to gain performance is the so called [*out-of-order execution*](https://en.wikipedia.org/wiki/Out-of-order_execution), which basically means that the CPU can re-order micro operations, so they run in parallel or sometimes even before the preceding instructions. So instead of running all instructions in a strict sequential order it will run them as soon as required resources are available. + +![Out-of-order execution](/linaro-website/images/blog/out-of-order-execution) + +As an example, in the code above it could be that the MOV instructions takes several cycles to complete and therefore instead of just stalling the pipeline the CPU will continue and execute the ADD and the SUB instructions since there are no dependencies on the MOV instruction (the registers R1 and R2 are not used by the instructions on lines 2 and 3). + +*** + +# Branch predictor + +The next CPU feature to mention is the so called [*branch predictor*](https://en.wikipedia.org/wiki/Branch_predictor). The reason for having a branch predictor is to make guesses whether a branch will be taken or not. This goes hand in hand with the speculative executions, since the speculative execution will execute instructions based on where the branch predictor believes the execution will continue. If it turns out that the branch predictor is wrong, then we have the situation where we have done some extra unnecessary work that needs to be thrown away. As one could imagine, a wrong guess by the branch predictor will introduce some extra delay. So without the branch predictor the CPU would just have been sitting idle and waiting instead of doing useful work. + +![Branch Prediction For Loop](/linaro-website/images/blog/branch-prediction-for-loop) + +A simple example would be a while loop. Let’s say it executes 100 times until the condition changes, branch prediction hardware would predict that the branch will be taken until suddenly it is not taken. This means that for 100 loops of the code, the CPU correctly predicted the branch. It also means that it predicted the final branch wrongly and had to throw away some work. + +CPU branch prediction hardware can be pretty complicated, since the branch predictor as such consists of special hardware and there are a lot of different branch prediction strategies. It also depends on whether it is about doing prediction for direct branches, conditional branches as well as doing prediction for [indirect branches](https://en.wikipedia.org/wiki/Indirect_branch), where the latter is about trying to figure out a good target address to branch to. For branch target prediction the CPU keeps a history of whether branches were taken or not in in the past (Branch Target Buffer, BTB). You can think of it as an array with mappings from different PC (Program Counter) to addresses corresponding to the last address it jumped to from a particular PC. In the most simple way such a Branch Target Buffer could look like this: + +![Branch Target Buffer](/linaro-website/images/blog/branch-target-buffer) + +A thing to notice here is that this type of information is not bound to a particular process or [Exception Level](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0488d/CHDHJIJG.html), instead this is something that is shared across exceptions levels etc, with "whatever" code is running on the same core. + +*** + +# Caches + +The last CPU feature to mention before looking into Meltdown and Spectre is the [CPU cache](https://en.wikipedia.org/wiki/CPU_cache). There is no need to go into the nitty gritty details about caches and why we need them since caches are well established technology and known to most people. Without caches our systems would be incredibly slow, since it simply takes too much time to fetch information from main memory every time, so in a modern (high end) system it is a must to use them. More interesting is to know what ends up in the cache and why? Usually caches makes use of the [principle of locality](https://en.wikipedia.org/wiki/Principle_of_locality), in which data in memory that are going to be used, will probably use data in nearby memory, either in terms of time (temporal locality) or in terms of data corresponding to addresses nearby (spatial locality). The cache is also split into *cache sets* and divided into blocks, usually referred to as *cache lines*. Here we are talking about sizes in the order of 64 bytes per cache line or so (could be less, could be more). So what we have on an Arm system with [*Set-Associative-Cache*](https://en.wikipedia.org/wiki/CPU_cache#Two-way_set_associative_cache) is something like this: + +![Set Associative Cache](/linaro-website/images/blog/set-associative-cache) + +The cache sets are the horizontal rows, the columns are the *ways* and the individual cells are the cache lines. As depicted in the image (marked with blue color) we can see that the "*Index*" is use to select a certain cache set. + +*** + +# Side channel attacks + +This category of attack makes use of properties and behaviour occurring outside the code itself that leaks information that otherwise should not be observable. There are many different variants of this. Measuring the amount of time it takes to do certain operations is a common [side channel attack](https://en.wikipedia.org/wiki/Side-channel_attack). The textbook example is when you are verifying a password using *memcmp* (which by the way is a very bad idea). The way *memcmp* works is that it compares one byte at a time and as soon as they do not match *memcmp* will [return to the caller](https://github.com/gcc-mirror/gcc/blob/master/libiberty/memcmp.c#L30-L31). So it is easy to understand that by measuring the time it takes to do a password comparison in this way, you can see that it will take more time if it is a correct password being verified than verifying an incorrect password. By knowing this you could just simply try all possible characters (255 bytes) one by one. The call that takes the most amount of time is probably the one containing the correct character and then one can advance to the next character, rinse and repeat all the way until the entire password has been recovered. Below is an example of how this could look, here we have used the instruction [*rdtsc*](https://en.wikipedia.org/wiki/Time_Stamp_Counter) (x86) to do measurements. The correct password is "foo" and as we can see, when all characters are wrong it takes 486 cycles for the operation until *memcmp* returns. But, when you have the full (and correct) password it takes 561 cycles. + +![Password Timing Attack](/linaro-website/images/blog/password-timing-attack) + +\[Source: [joakimbech.com - Timing Attack - Proof of Concept](https://jyx.github.io/timing-attack-proof-of-concept.html)] + +There are many similar techniques, for example Simple Power Analysis ([SPA](https://en.wikipedia.org/wiki/Power_analysis#Simple_power_analysis)) and Differential Power Analysis ([DPA](https://en.wikipedia.org/wiki/Power_analysis#Differential_power_analysis)), both of which measure the amount of power used at a certain point in time. This is a very powerful type of side channel attack that has been able to completely break the security in many devices. As it turns out,the cache can also be subject of side channel timing attacks, since the cache itself leaks information. If you measure the amount of time it takes to access data, you can figure out whether the data was already in the cache or not. We will look into a couple of techniques for this here. + +*** + +## Prime and Probe + +In this scenario, the attacker makes sure that he fills the cache completely with his own data to start with. + +![Prime and Probe](/linaro-website/images/blog/prime-probe-01) + +After this the attacker makes sure that the victim’s code runs. This could potentially be a more privileged domain doing some crypto operation or something else using some sensitive information. Since the attacker started out by filling up the cache completely, the CPU needs to replace data in some location in the cache. I.e, it will evict some of the attacker’s data from the cache and put the victim’s data there instead. What we have been doing so far is the "Prime" part of this attack. The second part, the “Probe” part takes place when we return back to the attacker’s code. What the attacker does is to start accessing their own data and by measuring the time it takes to access their data, it is possible to determine whether it was a cache hit (fast access) or cache miss (slow access). + +![Prime and Probe](/linaro-website/images/blog/prime-probe-02) + +The ones of interest here are the cache misses, since they will tell that the attacker’s data has been replaced with something else, i.e., the victim’s data. So in the blue line (the cache set) above the attacker will get a slow access to this cache line due to a cache miss. + +*** + +## Flush and Reload + +If we have access to shared memory the attacker can use a cache side channel attack similar to prime and probe, the major difference here is that both the attacker and the victim uses shared memory where the same memory is mapped into the attacker’s and the victim’s separate virtual memory space. + +![Flush and Reload](/linaro-website/images/blog/flush-reload) + +This means that data in a certain cache line will be in the cache for both the victim and the attacker. What the attacker will do in this case is similar to the prime and probe, but instead of filling the cache the attacker will instead flush the cache line, so the data is not in the cache anymore. Next the attacker lets the victim run the code dealing with sensitive data which means that the CPU is going to put some data back into the cache. When the victim has completed running the code, the attacker will try to read the data again and by measuring the time it takes (cache hit or not) he can once again determine whether the victim accessed the data or not. + +*** + +# Meltdown - Rogue data cache load (CVE-2017-5754) + +We now have enough background to start looking into the recent attacks, let us start with *Meltdown*. For a very long time the kernel’s memory mappings have been present even when running in unprivileged mode. The main reason for this is performance, since with the mapping readily available there is no work to be done when switching context from user space to kernel. Conceptually this is not a problem, since there are ways to protect user space from actually being able to read the mapped kernel memory. That type of access information (readable, writable, executable and user space accessible) is stored in the [page tables](https://en.wikipedia.org/wiki/Page_table). But as we will see, [researchers](https://meltdownattack.com) recently found a way, due to (undesired) side effects of out-of-order, speculative execution and how kernel switches contexts, to completely overcome the memory isolation between privileged and unprivileged domains and thereby found a way to read kernel memory from a user space process. + +The whitepaper mentions a couple of different ways to do the Meltdown attack. One involves making use of exceptions and another way is to suppress exceptions (and only run instructions speculatively). In the case when doing the attack involving exceptions, it could be that the attack is triggered by a client trying to access kernel memory. What then happens when user space tries to access kernel memory is that we get an exception that immediately [traps](https://en.wikipedia.org/wiki/Trap_\(computing\)) to the kernel. Architecturally the kernel and the CPU will not allow the user space process to access the memory and from user space point of view you will get some kind of error message in return (or the process will simply be terminated). However, between the exception has been handled and before returning back to the client in user space, some instructions might still be executed due to out-of-order execution (unseen outside the CPU) and that could very well be instructions that are accessing kernel memory and because of this kernel memory will be put into the cache. + +When the exception is being handled, just before returning to user space, the CPU will discard the registers, memory etc in use by the out-of-order execution. But what is not discarded is the "out-of-order" memory that was put in the cache. That is still there and that is what is used in the Meltdown attack. From user space one can run attacks like the Flush and Reload or Prime and Probe as described earlier to get the leaked information. + +What does all this really mean in practice? What are the practical attacks and do we have to worry about them? Let us for example say that you are using disk encryption. To decrypt (or encrypt) files, a key needs to be present somewhere. If such a key is present in kernel memory, then due to Meltdown it is possible to get access to this key from user space. Since many systems are multiuser systems this is a serious problem. Think about environments where computers that are shared by students at universities or think about companies hosting cloud services where their customers are running in containers like Docker, LXC etc. In most cases, all users are supposed to be running in their own "sandbox" without being able to access data from other users. With Meltdown it is possible to get out of the sandbox and also escape containers. + +The mitigation being proposed for this is called KAISER. In short what that is about is that instead of having the entire kernel mapped when running in user space, you should only have the necessary kernel functionality mapped (interrupt handler etc). In Linux this feature is being called [KPTI](https://en.wikipedia.org/wiki/Kernel_page-table_isolation), i.e., Kernel Page Table Isolation and has been implemented for both x86 and Arm. It should be noted that this is not something unique to Linux. Most other OSes (Windows, macOS, Android etc) have used similar mapping techniques and even secure OSes (TrustZone) are doing this. So the KAISER idea needs to be applied in lots of places. + +Another thing to note regarding Meltdown is that not all processor architectures are susceptible to this attack. Intel seems to have a hard time here where many of their processors architectures are affected. Arm on the other hand have just [listed](https://developer.arm.com/support/security-update) a few processor architectures being affected by this (and mainly Cortex-A75), which means that there are not really many Arm devices out there yet that are exposed to this attack. + +*** + +## Meltdown patches and performance issues? + +In the news there have been lots of mentions saying that patching Meltdown gives a significant performance drop ([Arstechnica](https://arstechnica.com) has a good [summary](https://arstechnica.com/gadgets/2018/01/heres-how-and-why-the-spectre-and-meltdown-patches-will-hurt-performance/)). This is true in general, but how big a performance hit you will get depends on the use case. If we think about the mitigation for a minute, what are the implications of not having the kernel mapped in user space any longer? For user space it does not really matter as such, but as soon as user space needs to access some (privileged) service running in kernel it will do a [syscall](https://en.wikipedia.org/wiki/System_call). When doing the syscall we jump from running code in user space to instead run code in kernel and for kernel to be able to run its code, it needs to put back the kernel memory mappings again. This takes time and before we did not have to do this. So, in short, a syscall has become more expensive to do now compared to the past. Imagine you have an I/O-heavy use case, then you will tend to do lots of syscalls and therefore these types of use cases will see a greater performance hit than others. Worst case for performance are probably pure test cases that are mainly testing syscalls and that is probably where we are seeing figures saying up to 50% decrease in performance. Contrary there are uses cases that are not using that many syscalls and in those cases the performance hit will be negligible. + +*** + +# Spectre + +Spectre is related to Meltdown in the sense that the two Spectre attacks both make use of the same side effects as Meltdown is making use of, i.e., that data from the code being speculatively executed will end up in the cache. The major difference here is that the attacks are not limited to only access kernel memory from user space. With the Spectre attacks you can, if successfully exploited, also access memory from other processes running in the same privilege level or even get access to memory from any other domain running on the system. So what do we mean by that? We shouldn’t forget that in addition to the user space and kernel, we have more privilege levels, for example in an Armv8-A system we also have [Hypervisor](https://en.wikipedia.org/wiki/Hypervisor) mode, Secure Monitor and on the secure side (TrustZone) we also have user space (Trusted Applications) and kernel (secure OS). This means that Spectre attacks works across boundaries regardless of privilege level and secure state. + +There are many ways an attacker could make use of the Spectre attacks. One that has been mentioned affects browsers. Let us for example say that you visit a website that runs some malicious "Spectre Javascript code" in one of your tabs in the web browser, that piece of code could get access to sensitive information running the other tabs in the browser. In principle, the malicious piece of code could get access to passwords, cookies and other data that is in the web browsers memory. Let us now have a look at the two attacks individually. + +*** + +## Variant 1: Bounds check bypass (CVE-2017-5753) + +The core in this attack is that an attacker sends an out-of-bounds variable used to index some array which under normal circumstances would be OK to access (if the parameter was within the limits). In the Spectre [whitepaper](https://spectreattack.com/spectre.pdf) they listed the code below. + +![Spectre Example](/linaro-website/images/blog/spectre-v1-example) + +\[Source: [https://spectreattack.com/spectre.pdf](https://spectreattack.com/spectre.pdf)] + +Here `x` is the user provided data and `array1_size` is the full length of `array1`. By just looking at the code everything seems fine. I.e, if `x` is too big, then nothing will happen. But if `array1_size` is not cached, then it will take some time before the CPU will know the answer whether this branch will be taken or not since it will need to bring in `array1_size` from the main memory. Meanwhile doing that, the CPU will under some conditions speculative execute the line inside the `if` statement even if `x` was an out-of-bound value. + +So what happens at the second line if `array1` is a byte value? What are the possible values we could get by reading `array1[x]`? Simple, we get a byte value in the range \[0-255] and it cannot be anything else. This is the value that we are looking for in the attack, but since we cannot simply read it from the cache there are some more work to be done and that is where `array2`and the multiplication with 256 comes into play. By doing this multiplication we will load data somewhere in `array2` at position \[0 \* 256, 1 \* 256, 2 \* 256, 3 \* 256, … , 256 \* 255] and now is time for the Eureka moment! The value `y` or whatever `array2[]` evaluates to are not of any interest in the attack. What is of interest is that what is being updated in the cache. + +Let us say for example that `array1` and the two adjacent out-of-bound bytes in memory looks like this: + +![Spectre v1 Memory](/linaro-website/images/blog/spectre-v1-memory) + +If a rogue user sends an out-of-bounds value of x so the speculative execution evaluates `array1[x] = 0x66` (i.e., the last value listed in the image above), then the access to `array2` will read memory on location `array2[0x66 * 256]` and the cache line corresponding to this address will be updated accordingly. Do you remember the prime and probe attack? I.e, suppose that we filled the entire cache before running this piece of code. Besides some noise, what would happen with the cache when returning from the code in this example? The cache line corresponding to byte 0x66 would have been evicted and replaced with this new value. + +Upon return, the attacker would simply loop over all possible cache sets / cache lines and the one that has a cache miss would probably be the cache line corresponding to the value \[0x66 \* 256] (it is possible to come up with such a mapping). I.e, the attacker have figured out that the value of byte `array1_size + 2` is 0x66. By controlling the speculative execution and by submitting different out-of-bounds values, it is possible for the attacker to read any memory. This explanation is a bit simplified, but it explains the essence of this attack. In reality one needs to do some more work to be able to figure out which address the value maps to etc. For readers interested in a real example, there is a Spectre example implementation for x86 in the Spectre [whitepaper](https://spectreattack.com/spectre.pdf) (appendix A, listing 4). + +The mitigation here is not as straightforward as for Meltdown and many processor architectures are susceptible to this attack (Intel, Arm, AMD etc) . What developers needs to do here is to manually inspect the code to see if there are memory access patterns in the code similar to the example we described here. This can be tricky, error prone and is something that needs to be done again over time. Toolchains (GCC \[[1](https://gcc.gnu.org/ml/gcc-patches/2018-01/msg00205.html)], \[[2](https://gcc.gnu.org/ml/gcc-patches/2018-01/msg00211.html)], LLVM \[[3](https://reviews.llvm.org/D41760)], \[[4](https://reviews.llvm.org/D41761)] etc) will get patches with new [intrinsics](https://lwn.net/Articles/740157/) that can assist developers when trying to locate vulnerable code sections. + +This attack is thought to be hard to put in practice. Google engineers have been able to [demonstrate](https://googleprojectzero.blogspot.se/2018/01/reading-privileged-memory-with-side.html) it by leverage the [eBFP](https://opensource.com/article/17/9/intro-ebpf) (extended Berkeley Packet Filter) bytecode interpreter and JIT engine in Linux kernel, but other than that we are unaware of other successful attacks. + +*** + +## Variant 2: Branch target injection (CVE-2017-5715) + +In the second variant of Spectre, the attacker tricks the branch predictor to either take or not take branches and thereby influences what code will be speculatively executed. This can for example be done by filling the Branch Target Buffer (described in the Branch Predictor section) when running user space code and then later, when running more privileged code (on the same core) the speculative execution will take place at the indirect branches as being told by the BTB. The thing to pay attention to here is that branch prediction and speculation are not filtered by the exception level that the processor was in. + +To be able to exploit this, the attacker must find gadgets that can be used as a trampoline to run code in a way making this exploit possible. A gadget is a set of instructions that can be chained together with other gadgets that makes it possible for an attacker to run arbitrary instructions on a machine. Readers familiar with [ROP](https://en.wikipedia.org/wiki/Return-oriented_programming) (Return Oriented Programming) have probably heard about gadgets in the past. + +The rest of the attacks are the same as for the first variant of Spectre, i.e, data ends up in the cache that can then later leak memory due to a covert channel. Of all three attacks being discussed, this is the one that is thought to be hardest to put in practice. There are many pieces in a big puzzle that need to match, so an attacker would need to know a lot about the target itself and the code running between their "attacker" code and the target. + +The main mitigation technique that has been discussed here is to invalidate the branch predictor when moving across different privilege levels (and secure state). Most processor architectures have instructions for doing this and some may need [special treatment](https://github.com/Arm-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715). However, Google have proposed another technique that they call [retpoline](https://support.google.com/faqs/answer/7625886). It sounds to be something that can be used on multiple architectures (Intel, Arm etc), this technique seems to be cheaper in terms of performance hit compared to doing branch invalidate. + +*** + +# Is TrustZone affected? + +Conceptually TrustZone works in a similar way to a normal OS and by that we mean that you also have different privilege levels when running code on the secure side. In a TrustZone solution we are running *Trusted Applications* in secure user space and we are running the "TrustZone kernel" in a privileged kernel mode. This is often depicted as in the image below. + +![Trustzone Matrix](/linaro-website/images/blog/trustzone-matrix) + +Because working in a similar way as a traditional OS the secure OSes are also susceptible to the attacks we have described here. We will look more into the TrustZone in the upcoming blog post where we will talk about how this affects OP-TEE and what has been done in OP-TEE to prevent the exploits from being used. + +*** + +# Summary + +This is the first part in a series of blog posts about Meltdown and Spectre. The intention here was to penetrate the whitepapers and give an easy to grasp overview of the attacks. In the upcoming blog post we will talk more about individual components, like OP-TEE, Linux kernel and other firmware. + +*** + +# References + +* Arm processor security update: + [https://developer.arm.com/support/security-update](https://developer.arm.com/support/security-update) + +* Spectre and Meltdown + [https://spectreattack.com](https://spectreattack.com) + [https://meltdownattack.com](https://meltdownattack.com) + +* Google Project Zero + [https://googleprojectzero.blogspot.se/2018/01/reading-privileged-memory-with-side.html](https://googleprojectzero.blogspot.se/2018/01/reading-privileged-memory-with-side.html) + +* OP-TEE Mailing list [http://eepurl.com/cSqzDf](http://eepurl.com/cSqzDf) + +* OP-TEE Website [https://www.op-tee.org/](https://www.op-tee.org/) diff --git a/src/content/blogs/networking-leaders-collaborate-to-maximize-choice-performance-and-power-efficiency.mdx b/src/content/blogs/networking-leaders-collaborate-to-maximize-choice-performance-and-power-efficiency.mdx new file mode 100644 index 0000000..52580f5 --- /dev/null +++ b/src/content/blogs/networking-leaders-collaborate-to-maximize-choice-performance-and-power-efficiency.mdx @@ -0,0 +1,122 @@ +--- +excerpt: Industry leaders including AppliedMicro, Arm, Enea, Freescale®, LSI, + MontaVista, Nokia Siemens Networks and Texas Instruments (TI) have formed a + new group focused on accelerating Linux development for Arm processors in + cloud and mobile infrastructure. +title: Networking Leaders Collaborate to Maximize Choice, Performance and Power + Efficiency +description: Industry leaders including AppliedMicro, Arm, Enea, Freescale®, + LSI, MontaVista, Nokia Siemens Networks and Texas Instruments (TI) have formed + a new group focused on accelerating Linux development for Arm processors in + cloud and mobile infrastructure. +image: linaro-website/images/blog/IMAGE_HOMEPAGE +author: linaro +date: 2013-02-20T12:24:07.000Z +link: /news/networking-leaders-collaborate-to-maximize-choice-performance-and-power-efficiency/ +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 20 FEB 2013 + +## Industry leaders including AppliedMicro, Arm, Enea, Freescale®, LSI, MontaVista, Nokia Siemens Networks and Texas Instruments (TI) have formed a new group focused on accelerating Linux development for Arm processors in cloud and mobile infrastructure. + +Linaro, the not-for-profit engineering organization developing open source software for the Arm® architecture, today announced the formation of the Linaro Networking Group (LNG) with twelve founding member companies including AppliedMicro, Arm, Enea, Freescale, LSI, MontaVista, Nokia Siemens Networks and Texas Instruments (TI) at the Embedded Linux Conference (ELC). + +With Arm-based SoCs at the heart of the transformation occurring in cloud and mobile infrastructure applications such as switching, routing, base-stations and security, Linaro’s members are collaborating on fundamental software platforms to enable rapid deployment of new services across a range of converged infrastructure platforms. Developing the base platform for diverse and complex networking applications requires a significant amount of software that addresses common challenges. LNG will deliver this as an enhanced core Linux platform for networking equipment. Linaro has been providing common core software for Arm-Powered®, Linux-based mobile devices since June 2010 with recognized success, and it is now building on the collaborative working model that it has created to form special groups focusing on the server and networking segments. + +Networking infrastructure is undergoing a transformation driven by the ramp in diverse data being moved through disparate networks to and from billions of diverse devices. The industry needs to simplify the management of the network as well as create new applications that will enable cloud service providers, carriers and others to reliably provide a great user experience across expanded mobility use cases and the increasing globally-connected intelligence of devices. Enterprises need to scale their networks and their network management capabilities to cope with these demands and also enable the rapid evolution of applications for new revenue-generating business models. LNG will accelerate this transformation through its initial focus on fundamental optimizations for use across all Arm-based networking infrastructure equipment. + +“The strength of the Arm community is in working together and innovating,” said George Grey, CEO of Linaro. “We are very pleased to host the new Linaro Networking Group software engineering team, focused on consolidating and optimizing common software for networking equipment applications using Arm SoCs. We look forward to working with SoC vendors, equipment manufacturers and members of the software ecosystem in building and maintaining world-class open source foundation software for this market.” + +Linaro has a unique business model where multiple companies jointly invest in a software engineering team that creates core open source software in a collaborative and transparent environment. The effectiveness of Linaro’s approach has been demonstrated by Linaro becoming one of the largest company contributors to recent Linux kernels\*. Linaro’s contribution to improving Arm’s support in the open source Linux community has also been recognized by Linus Torvalds\*\*. + +“Linux and collaborative development are the drivers for innovation supporting new data and networking demands. The Linaro Networking Group will help advance important work in this area,” said Jim Zemlin, executive director at The Linux Foundation. “We look forward to ongoing collaboration with Linaro, including its participation at this year’s Embedded Linux Conference.” + +An interim steering committee for LNG has been meeting since the end of 2012 and has agreed on four initial areas of work: + +1. Virtualization support with considerations for real-time performance, I/O optimization, robustness and heterogeneous operating environments on multi-core SoCs. +2. Real-time operations and the Linux kernel optimizations for the control and data plane. +3. Packet processing optimizations that maximize performance and minimize latency in data flows through the network. +4. Dealing with legacy software and mixed-endian issues prevalent in the networking space. + +Linaro expects initial software deliveries from the Linaro Networking Group during the first half of 2013 with on-going monthly releases thereafter. + +*\*Sources: Statistics from the 3.7 development cycle Jonathan Corbet, LWN, 28 November 2012: (subscription required), Who wrote 3.5? Greg Kroah-Hartman, LWN, 25 July 2012:[ https://lwn.net/Articles/507986/](https://lwn.net/Articles/507986/) (subscription required) and earlier LWN articles.* + +\_Source: Torvalds touts Linux’s advances in power, Arm and cell phones Paula Rooney, ZDNet, 30 August 2012 + +**About Linaro** + +Linaro is the place where engineers from the world’s leading technology companies define the future of Linux on Arm. The company is a not-for-profit engineering organization with over 140 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone, and to reduce non-differentiating and costly low level fragmentation. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit [.](./) + +**Linaro Networking Group (LNG) Founding Member Testimonials** + +*** + +**Applied Micro Circuits Corporation** + +“With AppliedMicro's rich history in high speed connectivity, we quickly grasped the potential the Linaro Networking Group offers our customers and the industry at large,” said Mike Major, vice president of corporate marketing. “As the producer of the world's first Arm 64-bit server on a chip and a founding member of the Linaro Enterprise Group, we well understand the promise of open hardware and software, and we look forward to working with the Linaro Networking Group.” + +*About AppliedMicro:* Applied Micro Circuits Corporation is a global leader in computing and connectivity solutions for next-generation cloud infrastructure and data centers. AppliedMicro delivers silicon solutions that dramatically lower total cost of ownership. Corporate headquarters are located in Sunnyvale, California. See [www.apm.com](http://www.apm.com/) for more information. + +Media contact: Diane Orr, diane@orr-co.com. + +*** + +**Arm** + +“The creation of the Linaro Networking Group reflects the opportunity for Arm architecture-based platforms, across cloud and mobile infrastructure, to meet the demand for energy efficient, high performance and scalable networks. This dramatic shift is being driven by the expansion of connected devices, related analytics and applications, and the introduction of new services,” said Ian Drew, executive vice president, Marketing, Arm. “The Linaro Networking Group enables networking infrastructure stakeholders to accelerate their next generation deployment of reliable, scalable and secure high performance networks, with optimized open source software platforms.” + +*About Arm:* Arm designs the technology that lies at the heart of advanced digital products, from wireless, networking and consumer entertainment solutions to imaging, automotive, security and storage devices. Arm’s comprehensive product offering includes 32-bit RISC microprocessors, graphics processors, video engines, enabling software, cell libraries, embedded memories, high-speed connectivity products, peripherals and development tools. Combined with comprehensive design services, training, support and maintenance, and the company’s broad Partner community, they provide a total system solution that offers a fast, reliable path to market for leading electronics companies. See [www.arm.com](http://www.arm.com/) for more information. Media contacts: Raymond Deplazes, armus@racepointgroup.com; Simon Hilliard, armuk@racepointgroup.com. + +*** + +**ENEA** + +“Enea has provided software for Arm based architectures since the nineties. Our membership in the Linaro Networking Group (LNG) reflects our commitment to Linux and to Arm in the networking space. It is an important strategic step towards our goal to become the leading embedded Linux provider for Arm based systems”, says Tobias Lindquist, CTO, Enea. “As an independent Linux provider, Enea is in a strong position to focus on the needs of network equipment manufacturers and on software solutions that are hardware vendor agnostic”. + +*About Enea:* Enea is a global vendor of Linux and Real-time operating system solutions including middleware, tools, protocols and services. The company is a world leader in developing software platforms for communication-driven products in multiple verticals, with extreme demands on high-availability and performance. For more information, please visit: [www.enea.com](https://www.enea.com/). Media contact: Catharina Paulcén, catharina.paulcen@enea.com. + +*** + +**Freescale Semiconductor** + +“With strong market share leadership in communications processing and a deep understanding of both Linux and Arm technologies, Freescale is well positioned to play a key role in accelerating Linux development for the Arm architecture,” said Tareq Bustami, vice president of product management for Freescale’s Digital Networking group. “Serving as a founding member of the Linaro Networking Group is an extension of our work within open source and a new focus for our relationship with Linaro. We look forward to working together with other leading players within the group to advance Linux technology for our customers and the larger industry.” + +*About Freescale:* Freescale Semiconductor is a global leader in embedded processing solutions, providing industry-leading products that are advancing the automotive, consumer, industrial and networking markets. From microprocessors and microcontrollers to sensors, analog integrated circuits and connectivity – our technologies are the foundation for the innovations that make our world greener, safer, healthier and more connected. Some of our key applications and end-markets include automotive safety, hybrid and all-electric vehicles, next generation wireless infrastructure, smart energy management, portable medical devices, consumer appliances and smart mobile devices. The company is based in Austin, Texas, and has design, research and development, manufacturing and sales operations around the world. See[ www.freescale.com](https://www.nxp.com/) for more information. + +*** + +**LSI** + +“LSI’s industry leading Axxia Multicore processor platform based on Arm CPU’s is designed to intelligently handle the explosive growth of traffic in mobile and enterprise environments. LSI’s collaboration with the Linaro Networking Group and its open source Linux ecosystem initiatives will further drive system deployment and reduce total ownership costs for service providers”, said Jon Devlin, Director, Networking Ecosystem, LSI. + +*About LSI:* LSI Corporation designs semiconductors and software that accelerate storage and networking in datacenters, mobile networks and client computing. Our technology is the intelligence critical to enhanced application performance, and is applied in solutions created in collaboration with our partners. More information is available at [www.lsi.com](http://www.lsi.com/). + +*** + +**MontaVista** + +“MontaVista is the only provider of Carrier Grade Linux on Arm processors today. We are extremely excited about enabling this new and disruptive architecture into the Network Infrastructure and Telecommunication markets,” said Patrick MacCartee, Director of Marketing for MontaVista Software. “The Linaro Networking Group will play a crucial role by ensuring that the industry has a robust and rich software ecosystem necessary to support the next generation of Arm-based telecom and software defined network solutions.” + +*About MontaVista:* MontaVista Software, LLC, a wholly owned subsidiary of Cavium, Inc. (NASDAQ: CAVM), is a leader in embedded Linux commercialization. For over 10 years, MontaVista has been helping embedded developers get the most out of open source by adding commercial quality, integration, hardware enablement, expert support, and the resources of the MontaVista development community. Because MontaVista customers enjoy faster time to market, more competitive device functionality, and lower total cost, more devices have been deployed with MontaVista than with any other Linux. To learn more, please visit http://www.mvista.com/. + +*** + +**Nokia Siemens Networks** + +“Embedded networking software has been very fragmented due to point optimized hardware implementations. As a founding member of Linaro Networking Group, Nokia Siemens Networks is committed to collaborating in the creation of the Linux ecosystem for Arm that enables easier adoption of new technology while leaving room for innovative SoC designs”, says Jarmo Hillo, Head of Processor Technology at Nokia Siemens Networks. + +*About Nokia Siemens Networks:* Nokia Siemens Networks is the world’s specialist in mobile broadband. From the first ever call on GSM, to the first call on LTE, we operate at the forefront of each generation of mobile technology. Our global experts invent the new capabilities our customers need in their networks. We provide the world’s most efficient mobile networks, the intelligence to maximize the value of those networks, and the services to make it all work seamlessly. + +With headquarters in Espoo, Finland, Nokia Siemens Networks operates in over 100 countries and had net sales of approximately 13.8 billion euros in 2012. More information is available at [www.nokiasiemensnetworks.com](http://www.nokiasiemensnetworks.com/). Media contact: Riitta Mård, riitta.mard@nsn.com. + +*** + +**Texas Instruments (TI)** + +*About TI:* Texas Instruments semiconductor innovations help 90,000 customers unlock the possibilities of the world as it could be – smarter, safer, greener, healthier and more fun. Our commitment to building a better future is ingrained in everything we do – from the responsible manufacturing of our semiconductors, to caring for our employees, to giving back inside our communities. This is just the beginning of our story. Learn more at [www.ti.com](http://www.ti.com/). diff --git a/src/content/blogs/on-the-performance-of-arm-virtualization.mdx b/src/content/blogs/on-the-performance-of-arm-virtualization.mdx new file mode 100644 index 0000000..8d92c30 --- /dev/null +++ b/src/content/blogs/on-the-performance-of-arm-virtualization.mdx @@ -0,0 +1,88 @@ +--- +excerpt: "The first study of Arm virtualization performance on server hardware, + including multi-core measurements of two popular Arm and x86 hypervisors, KVM + and Xen. We show how Arm hardware support for virtualization can enable much + faster transitions between VMs and the hypervisor, a key hypervisor operation. + " +title: On the Performance of Arm Virtualization +description: In this article, Christoffer Dall takes a detailed look at the + Performance of Arm Virtualization. Read about his findings here! +image: linaro-website/images/blog/Code_Image_Core_tech +author: christoffer-dall +date: 2016-06-16T17:37:46.000Z +tags: + - arm + - linux-kernel + - virtualization +link: /blog/core-dump/on-the-performance-of-arm-virtualization/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner url=https://wiki-archive.linaro.org/CoreDevelopment](/linaro-website/images/blog/core-dump) + +## Abstract + +Arm servers are becoming increasingly common, making server technologies such as virtualization for Arm of growing importance. We present the first study(1) of Arm virtualization performance on server hardware, including multi- core measurements of two popular Arm and x86 hypervisors, KVM and Xen. We show how Arm hardware support for virtualization can enable much faster transitions between VMs and the hypervisor, a key hypervisor operation. However, current hypervisor designs, including both Type 1 hypervisors such as Xen and Type 2 hypervisors such as KVM, are not able to fully leverage this performance benefit for real application workloads. We discuss the reasons why and show that other factors related to hypervisor software design and implementation have a larger role in overall performance. Based on our measurements, we discuss changes to Arm’s hardware virtualization support that can potentially bridge the gap to bring its faster VM-to-hypervisor transition mechanism to modern Type 2 hypervisors running real applications. These changes have been incorporated into the latest Arm architecture. + +## Introduction + +Despite the importance of Arm virtualization, little is known in practice regarding how well virtualized systems perform using Arm. There are no detailed studies of Arm virtualization performance on server hardware. Although KVM and Xen both have Arm and x86 virtualization solutions, there are substantial differences between their Arm and x86 approaches because of key architectural differences between the underlying Arm and x86 hardware virtualization mechanisms. It is unclear whether these differences have a material impact, positive or negative, on performance. The lack of clear performance data limits the ability of hardware and software architects to build efficient Arm virtualization solutions, and limits the ability of companies to evaluate how best to deploy Arm virtualization solutions to meet their infrastructure needs. The increasing demand for Arm-based solutions and growing investments in Arm server infrastructure makes this problem one of key importance. + +Linaro, in collaboration with Columbia University, present the first in-depth study of Arm virtualization performance on multi-core server hardware. We measure the performance of the two most popular Arm hypervisors, KVM and Xen, and compare them with their respective x86 counterparts. These hypervisors are important and useful to compare on Arm given their popularity and their different design choices. Xen is a standalone bare-metal hypervisor, commonly referred to as a Type 1 hypervisor. KVM is a hosted hypervisor integrated within an existing OS kernel, commonly referred to as a Type 2 hypervisor. + +The detailed results of this study are to appear in the 43rd International Symposium on Computer Architecture (ISCA), a first-tier academic conference for computer architecture. + +## Background + +![Hypervisor designs](/linaro-website/images/blog/Hypervisor-designs) + +Figure 1 depicts the two main hypervisor designs, Type 1 and Type 2. Type 1 hypervisors, like Xen, comprise a separate hypervisor software component, which runs directly on the hardware and provides a virtual machine abstraction to VMs running on top of the hypervisor. Type 2 hypervisors, like KVM, run an existing OS on the hardware and run both VMs and applications on top of the OS. Type 2 hypervisors typically modify the existing OS to facilitate running of VMs, either by integrating the Virtual Machine Monitor (VMM) into the existing OS source code base, or by installing the VMM as a driver into the OS. KVM integrates directly with Linux where other solutions such as VMware Workstation use a loadable driver in the existing OS kernel to monitor virtual machines. The OS integrated with a Type 2 hypervisor is commonly referred to as the host OS, as opposed to the guest OS which runs in a VM. + +One advantage of Type 2 hypervisors over Type 1 hypervisors is the reuse of existing OS code, specifically device drivers for a wide range of available hardware. This is especially true for server systems with PCI where any commercially available PCI adapter can be used. Traditionally, a Type 1 hypervisor suffers from having to re-implement device drivers for all supported hardware. However, Xen, a Type 1 hypervisor, avoids this by only implementing a minimal amount of hardware support directly in the hypervisor and running a special privileged VM, Dom0, which runs an existing OS such as Linux and uses all the existing device drivers for that OS. Xen then uses Dom0 to perform I/O using existing device drivers on behalf of normal VMs, also known as DomUs. + +Transitions from a VM to the hypervisor occur whenever the hypervisor exercises system control, such as processing interrupts or I/O. The hypervisor transitions back to the VM once it has completed its work managing the hardware, letting workloads in VMs continue executing. The cost of such transitions is pure overhead and can add significant latency in communication between the hypervisor and the VM. A primary goal in designing both hypervisor software and hardware support for virtualization is to reduce the frequency and cost of transitions as much as possible. + +## Experimental Design + +To evaluate the performance of Arm virtualization, we ran both microbenchmarks and real application workloads on the most popular hypervisors on Arm server hardware. As a baseline for comparison, we also conducted the same experiments with corresponding x86 hypervisors and server hardware. We leveraged University of Utah’s CloudLab installation of hundreds of Arm 64-bit HP Moonshot m400 nodes and a plethora of x86 servers for our measurements. We compared Arm measurements with Intel Xeon 2.1 GHz ES-2450 CPUs in similar configurations of RAM, disk, network, and more. All network measurements were done with 10G isolated Mellanox networking equipment. + +We designed and ran a number of microbenchmarks to quantify important low-level interactions between the hypervisor and the Arm hardware support for virtualization. A primary performance cost in running in a VM is how much time must be spent outside the VM, which is time not spent running the workload in the VM and therefore is virtualization overhead compared to native execution. Therefore, our microbenchmarks are designed to measure time spent handling a trap from the VM to the hypervisor, including time spent on transitioning between the VM and the hypervisor, time spent processing interrupts, time spent switching between VMs, and latency added to I/O. + +To provide comparable measurements, we kept the soft-ware environments across all hardware platforms and all hypervisors the same as much as possible. We used the most recent stable versions available at the time of our experiments of the most popular hypervisors on Arm and their counterparts on x86: KVM in Linux 4.0-rc4 with QEMU 2.2.0, and Xen 4.5.0. KVM was configured with its standard VHOST networking feature, allowing data handling to occur in the kernel instead of userspace, and with cache=none for its block storage devices. Xen was configured with its in-kernel block and network backend drivers to provide best performance and reflect the most commonly used I/O configuration for Xen deployments. Xen x86 was configured to use HVM domains, except for Dom0 which was only supported as a PV instance. All hosts and VMs used Ubuntu 14.04 with the same Linux 4.0-rc4 kernel and software configuration for all +machines. A few patches were applied to support the various hardware configurations, such as adding support for the APM X-Gene PCI bus for the HP m400 servers. All VMs used paravirtualized I/O, typical of cloud infrastructure deployments such as Amazon EC2, instead of device passthrough, due to the absence of an IOMMU in our test environment. + +We designed a custom Linux kernel driver, which ran in the VM under KVM and Xen, on Arm and x86, and executed the microbenchmarks in the same way across all platforms. Using this framework, we ran seven microbenchmarks that measure various low-level aspects of hypervisor performance. + +## Results + +**KVM Arm Hypercall cost:** **6,500 cycles** +**Xen Arm Hypercall cost:**   **376 cycles** + +As an example of our microbenchmarks, we measured the cost of a no-op hypercall, measuring a transition from the VM to the hypervisor and a return to the VM without doing any work in the hypervisor; in other words the bidirectional base transition cost of hypervisor operations. The Hypercall microbenchmark shows that transitioning from a VM to the hypervisor on Arm can be significantly faster than x86, as shown by the Xen Arm measurement, which takes less than a third of the cycles that Xen or KVM on x86 take. + +We have analyze the true reasons for this difference in performance and developed and ran many more micro-level benchmarks, which can be found in the published paper about this work. + +We also ran a number of real application benchmark workloads to quantify how well the Arm virtualization extensions support different hypervisor software designs in the context of more realistic workloads. The benchmarks we ran include a mix of widely-used CPU and I/O intensive benchmark workloads. For workloads involving a client and a server, we ran the client on a dedicated machine and the server on the configuration being measured, ensuring that the client was never saturated during any of our experiments. We ran these workloads natively and on both KVM and Xen on both Arm and x86, the latter to provide a baseline comparison. + +![Benchmark performance](/linaro-website/images/blog/Benchmark-performance) + +Again, for an in-depth discussion of these results we refer you to the published paper, but we provide two examples here: First, the low hypercall performance of Xen vs. KVM on Arm, really only shows up in an isolated fashion in the hackbench results. The reason is that hackbench heavily utilizes the Linux scheduler, which results in a high amount of rescheduling virtual IPIs, and Xen Arm benefits from its low VM-to-hypervisor transition time for handling virtual IPIs. + +Second, consider the latency-sensitive network benchmark TCP*RR. This benchmark sends a single byte back and forward between a client and the server running in the VM, and shows high overhead on all platforms. To understand where this overhead is spent, we used \_tcpdump* to capture timestamps at various locations in the full software stack. These results showed us that the majority of the overhead spent on the incoming network path was between the physical machine running the VMs receiving a network packet and until the VM sees the packet, but only a relatively small part of this time was spent actually transitioning between the VM and the hypervisor. Instead, most time was spent in the networking layers of the host Linux OS for KVM, and in the Dom0 Linux OS for Xen. The same was true for the outgoing network path. The fundamental reason for Xen being slower than KVM in this case is due to Xen’s I/O model, which uses a special VM, Dom0, to handle physical network packets. Xen must perform expensive scheduling operations of the application VM and Dom0 and expensive mapping and unmapping operations to set up shared data mappings between the application VM and Dom0. + +These results are surprising given a typical focus on low-level hypervisor operations performance; instead, the hypervisor design and I/O model turns out to have a significant impact on real application performance. + +## Conclusions + +Arm hypervisors do not necessarily benefit from a  fast transition cost between the VM and the hypervisor, because hypervisor software requires more complex interactions than simply switching between execution contexts to support common macro operations like supporting I/O. Surprisingly, KVM Arm actually exceeds the performance of Xen Arm for most real application workloads involving I/O. This is due to differences in hypervisor software design and implementation that play a larger role than how the hardware supports low-level hypervisor operations. + +The new improvements to the Arm architecture, the Virtualization Host Extensions (VHE) may allow Type 2 hypervisors to bring Arm’s fast VM-to-hypervisor transition cost to real application workloads involving I/O given the combination of a simpler I/O model for Type 2 hypervisors and a VM-to-hypervisor transition cost that is potentially lower than on x86 systems. + +The published papers describes more performance numbers and offer more detailed explanations as well as give an in-depth overview of VHE and how Type 2 hypervisors benefit from these architectural changes. + +Christoffer Dall, Linaro Virtualization Tech Lead, will be presenting this work at ISCA 2016 in Seoul, Korea, this on Monday 20 at June 4-5pm in session 4B: NoC / Virtualization. http://isca2016.eecs.umich.edu. + +**References:** + +(1): [http://www.cs.columbia.edu/\~cdall/pubs/isca2016-dall.pdf ](http://www.cs.columbia.edu/~cdall/pubs/isca2016-dall.pdf) diff --git a/src/content/blogs/op-tee-open-source-security-mass-market.mdx b/src/content/blogs/op-tee-open-source-security-mass-market.mdx new file mode 100644 index 0000000..88438da --- /dev/null +++ b/src/content/blogs/op-tee-open-source-security-mass-market.mdx @@ -0,0 +1,47 @@ +--- +author: joakim-bech +date: 2014-09-03T17:06:05.000Z +comments: false +title: OP-TEE, open-source security for the mass-market +tags: + - android + - qemu +link: /blog/core-dump/op-tee-open-source-security-mass-market/ +description: In this article, Joakim Bech takes a detailed look at the history + of OP-TEE, the open-source security for the mass market. Read more on what + OP-TEE is here. +image: linaro-website/images/blog/Banner_Security +related: [] + +--- + +![thumb\_STMicroTransparent class=small-inline](/linaro-website/images/blog/thumb_STMicroTransparent) + +TEE. Behind this acronym hides the Trusted Execution Environment, a small OS-like environment that sits aside a rich operating system – for instance Android. The purpose of the TEE is to keep all secret credentials and data manipulation in the small TEE rather than in a larger rich OS that is often the vulnerable target of malware and hackers in general. In order to reach this goal, application software is architected in a way such that sensitive functions are precisely defined and offloaded to the TEE in the form of Trusted Applications. + +The concept was formalized around 2007 by the [**OMTP standardization forum**](http://en.wikipedia.org/wiki/Open_Mobile_Terminal_Platform), which issued a set of [**security requirements**](https://www.gsma.com/newsroom/all-documents/omtp-documents/omtp-documents-1-1-omtp-advanced-trusted-environment-omtp-tr1-v1-1/) on functionality a TEE should support. The GlobalPlatform organization went a step further by defining standard APIs: on the one hand, the TEE internal APIs that a Trusted Application can rely on, and on the other hand, the communication interfaces that rich OS software can use to interact with its Trusted Applications. It is worth noting that, because the TEE threat model assumes that nothing coming from the rich OS is trustworthy, the designer of a TA (Trusted Application) must assume that the rich-OS-side client of the TA may not be legitimate. Beyond the APIs, GlobalPlatform also introduced a [**compliance-testing process**](https://globalplatform.org/) to guarantee functional interoperability, and issued a Protection Profile to allow certifying that a TEE meets [**the necessary security level**](https://globalplatform.org/). + +Back in 2009, ST began its work on TEE as part of the ST-Ericsson mobile joint-venture and decided to promote the TEE together with its customers, among which was Nokia, **who first introduced the concept in the mobile industry**. To this end, a TEE relying on the Arm TrustZone® technology to provide isolation from the rich OS was implemented. The TEE had to fit into a very constrained environment, and TrustZone was key to making it possible as it provides isolation in hardware. Another design choice was made to keep the TEE small and simple: rely on the rich OS to schedule the TEE. This amounts to seeing TAs as extensions of rich OS threads. In other word, the TEE does nothing else than what the rich OS is asking. + +Already at that time, the long-term goal was to make the implementation available industry-wide, in order to defragment security implementations in mobile platforms. One key requirement to avoid fragmentation is the support of standards. In 2013, ST-Ericsson obtained **GlobalPlatform’s compliance qualification** with this implementation, proving that the APIs were behaving as expected in the GlobalPlatform specifications. + +At the same time, Linaro was investigating security and especially leverage and promotion of Arm TrustZone. Linaro’s core mandate is to build and maintain the Linux baseport on Arm, integrating the features of Arm cores in Linux. It was therefore quite natural for Linaro to extend its focus by supporting an open-source TEE port on Arm TrustZone, and building security features on it. This provided a clear opportunity to defragment the security ecosystem on Arm-based chipsets. Linaro and ST therefore agreed to collaborate to open-source the TEE. + +It has been almost a year since STMicroelectronics, with the full support from Ericsson, and Linaro, have joined forces to succeed in making the TEE, now called OP-TEE, available to the community. Ever since the Linaro Security Working Group was formed in September 2013, Linaro and ST engineers have worked together to revamp the code base, to make it portable, and to remove any legacy or ST-specific code. + +OP-TEE is now available on GitHub, at [https://github.com/OP-TEE](https://github.com/OP-TEE). It consists of three components in separate gits: the normal world user space client APIs (optee\_client), a Linux kernel TEE device driver (optee\_linuxdriver) and the Trusted OS (optee\_os). OP-TEE currently adheres to GlobalPlatform APIs, namely the GlobalPlatform TEE Client API 1.0 and GlobalPlatform TEE Internal API 1.0 specifications, available freely on the GlobalPlatform website. The Trusted OS part is under a BSD license, so that SoC vendors and device manufacturers may modify it without any obligation to disclose the modifications. This choice was key to make OP-TEE usable in commercial products, and thus to build an industry community around OP-TEE. The other major task was the abstraction of platform-specific parts in such a way that it should be fairly easy to port and incorporate OP-TEE in products from different vendors. + +![OP-TEE architecture with the scope of its three gits](/linaro-website/images/blog/op-tee_diagram) + +OP-TEE targets Arm cores and therefore includes a secure monitor code for TrustZone – which is the code executed when the core switches between TrustZone and non-TrustZone modes. We expect that it should still be fairly easy to use OP-TEE on architectures other than Arm TrustZone – for instance on the Cortex-M and Cortex-R range of Arm cores, and therefore further defragment security in embedded electronics, in areas such as Internet of Things or automotive. +By releasing OP-TEE to the public, ST and Linaro have provided a seed that will grow from contributions coming from the Arm ecosystem, and especially from Linaro members. An open-source TEE supporting standard interfaces and bringing a community will reduce fragmentation in the way Arm TrustZone is used to everyone’s benefit. We also anticipate that it will foster private and public applied research in security, by giving access to the technology to universities, researchers and governments around the world. + +*** + +**Hervé Sibert**  / System Security Architect, Director / STMicroelectronics + +Hervé is Security architect, Director, at STMicroelectronics. After 3 years as an engineer and researcher in cryptography and network security at France Telecom, he joined the Mobile and Personal Division of NXP in 2006, which was merged into ST-Ericsson. He is now driving integration of TEE in the architecture of ST products. He works closely with Linaro and is also active in standards organizations such as the Trusted Computing Group (TCG) and GlobalPlatform, where he coordinates the TEE Security Working Group. + +**Joakim Bech** / Security Working Group, Tech Lead / Linaro + +Joakim has been a Linux user for about 15 years and for the 8 years prior to joining Linaro he was working in the telecom industry for companies such as Sony Ericsson, EMP and ST-Ericsson. Roles there included architect, team leader and development engineer. Most of his time has been spent in embedded security where he was a major contributor to the GlobalPlatform certified TEE / TrustZone solution created by ST-Ericsson. diff --git a/src/content/blogs/opencsd-operation-use-library.mdx b/src/content/blogs/opencsd-operation-use-library.mdx new file mode 100644 index 0000000..1948914 --- /dev/null +++ b/src/content/blogs/opencsd-operation-use-library.mdx @@ -0,0 +1,368 @@ +--- +excerpt: This article will describe the programming and operation of the OpenCSD + library in decoding CoreSight™ trace. Starting with a brief review of + CoreSight technology and terminology, these elements will be related to the + configuration of the library in order to successfully decode a captured trace + stream. +title: OpenCSD – Operation and Use of the Library +description: In this article, Mike Leach takes a detailed look at operation and + use of the Library (OpenCSD). Read about his findings here! +image: linaro-website/images/blog/30921180788_34ce2cd5f8_c +author: mike-leach +date: 2016-07-29T21:57:01.000Z +tags: + - linux-kernel + - open-source +link: /blog/core-dump/opencsd-operation-use-library/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner url=https://wiki-archive.linaro.org/CoreDevelopment](/linaro-website/images/blog/core-dump) + +This article will describe the programming and operation of the OpenCSD library in decoding CoreSight™ trace. Starting with a brief review of CoreSight technology and terminology, these elements will be related to the configuration of the library in order to successfully decode a captured trace stream. + +A brief exploration of the key programming APIs will be provided, along with a description of the test and example programs and the test data that drives these. + +## **Introduction to the OpenCSD Library.** + +The OpenCSD library is designed to allow a client application to decode trace streams generated through CoreSight trace hardware. The library may be built natively on an Arm target, or on a host PC running Linux or Windows. The main library and API is written in C++, with a C API wrapper library provided for situations where this is preferred, and to ease the integration with scripting languages. The library can be used for trace captured on target, or by an off target capture device. + +The client application will configure the library according to the hardware settings of the CoreSight components which generated the trace. The client will also provide access to the memory images of the programs or regions traced in order for the trace decoding to correctly follow the traced instruction sequence. + +Once the library is correctly configured, the client application can then push the captured binary trace data through the library, which will result in a series of output packets describing the state of the core executing the instructions, such as the exception level, instruction architecture and security state, the instruction ranges executed on the core,  and core events such as exceptions and interrupts. + +The client application must then interpret these output packets to produce the desired information – for example, disassembling the instructions in the ranges indicated to provide the user with a view of the program execution. + +## **A Review of CoreSight™ terms and components.** + +A typical CoreSight system is shown in Figure 1. + +The components can be classified as follows:- + +1. **Trace sources:** These are the ETMs (or PTM) attached to the Cortex cores which trace the execution of the program running on that core, or software trace sources such as the STM, which can be directly written to by software running on a core to provide “printf” like messaging. + +2. **Trace sinks:** These can be the on-chip buffers, such as the ETB which contains a small amount of dedicated RAM to save the trace, or the ETR, which co-operates with the system software to save trace data to system allocated memory. Additionally there may be a TPIU component (not shown), that can connect the trace to an off chip capture device. + The trace sink will format the incoming trace data into a CoreSight frame format, which associates trace source data with the Trace ID. + +3. **Trace Infrastructure:**  The funnels, CTIs (not shown) and  replicators (not shown) which direct and multiplex sources to sinks, and can be used to control events which control the capture of trace. + +![Picture1](/linaro-website/images/blog/Picture1-core-dump) + +Figure 1:  Typical CoreSight System. + +The system software, or a program using the trace system, must program up the CoreSight components to generate trace as required. Each trace source is programmed with a CoreSight Trace ID, to allow the source to be identified when de-multiplexing the buffer and decoding the Trace. + +![image 1](/linaro-website/images/blog/image-1-core-dump) + +## **The Decode Process.** + +The task of decoding the incoming trace stream is a three stage process. + +1. **De-multiplex** by Trace ID. The library provides a de-multiplexing component that will split the incoming frame formatted stream into individual Trace ID streams. + +2. **Packet Processing*:*\*\* This converts the incoming Trace ID stream into a discrete set of trace packets according to the protocol defined by the trace source (ETMv3, ETMv4, PTM, STM). + +3. **Packet Decoding*:*\*\* This interprets an incoming set of trace packets, removing protocol specific elements to produce the **generic output packets** that describe the operation of the trace source – for example the instructions executed by the core. + +***Note:***\_ Where a system has a single trace source that does not use a formatter, or trace data has been previously de-multiplexed into individual trace streams, then the library can be programmed without the de-multiplexing stage.\_ + +## **Configuring the Library – Concepts.** + +This section discusses the basic concepts involved in configuring the library for trace decode. API specifics are presented in the next section. + +The library provides a decoder management component called a “**decode tree**”. This provides an API to create a connected set of decode components. **Figure 2** shows a configured decode tree inside a client application. + +![figure 2](/linaro-website/images/blog/figure-2-core-dump) + +Figure 2: Configured Decode Tree. + +A decode tree is setup for a given trace sink, and the trace sources tracing into that sink. The arrangement of the decode tree therefore mirrors that of the CoreSight hardware in the system. The + +client program must have knowledge of the CoreSight hardware and configuration in order to correctly setup the trace decoder. + +Configuration consists of a number of stages. First create a decode tree, which will automatically create a ***CoreSight*** ***frame de-multiplexor*** stage to interpret the CoreSight formatted trace frames and split these into individual Trace ID streams. This de-mux stage has output points for each possible Trace ID. + +Next decoder elements need to be created for each trace source we are interested in decoding - this could be all the trace sources that are generating trace, or a subset of them. Decoders are created using the create decoder API function on the decode tree, which requires that appropriate ***decoder configuration information*** is supplied – including the Trace ID. Creating a decoder automatically connects it to the appropriate Trace ID data stream. + +The decoder elements also require access to program memory to correctly follow the path of the executed instructions. The decode tree provides ***memory accessor*** interface to allow the client program to supply these memory areas. + +Finally, the client will implement a call-back interface to collect the ***generic trace element*** output, which will be attached to the decode tree. + +The library is now ready for use and the client can begin pushing the trace data into the library using the ***data path*** API. + +### **Decoder Configuration Information.** + +Trace sources allow a number of different configuration options. These vary depending on the protocol type and version but can generally be classified into two groups:- + +1. Options that control the type and format of the trace packets generated. These are options that determine if cycle accurate trace is generated, if timestamps packets are generated or if certain protocol optimisations are used, and of course the programmed Trace ID. These trace options, in the form of the programmed register values, are required by the decoder in order to correctly decode the trace. + +2. Options that control the amount of trace generated, or the start and stop of trace generation. These are trace address filtering ranges, start and stop address enables and trace trigger events.  The registers that may be programmed to control these features are not required for decode. + +The decoder API contains a structure for the required trace configuration registers that each protocol needs. When creating a decoder for a given trace source the client program will fill in this structure and pass it to the decode tree API. As the configuration contains the Trace ID, this decoder will be attached to the appropriate output on the de-multiplexor. + +**Program Memory Accessor.\*\*** +\*\*The packet decode elements that are responsible for following the program trace require access to the memory image of the traced program to analyse the opcodes executed. The memory accessor interface allows the client to provide direct memory access, access to saved memory buffers or file access to the program images. + +## **The Decode Tree Configuration API.** + +This section covers specific API functions and data types. Further API documentation is available in the source code, formatted for extraction by the ‘doxygen’ tool to create a reference manual. + +![figure 3](/linaro-website/images/blog/figure-3-core-dump) + +Figure 3 introduces some of the components and interface types used when connecting components within the decode tree. These connections are created automatically as the API is used to create the decoder objects within the tree. The diagram shows the path of the data through the decoder to the client application. + +***`ITrcDataIn`***: Input interface to both the frame de-mux and the packet processor. This accepts raw byte data. + +***`IPktDataIn

`***: Input interface to the packet decoder. Takes the protocol specific output packets from the packet processor and decodes the trace. + +***`IInstrDecode`***:  Interface to the opcode analyser. Used by the packet decoder to determine the instruction types – branch, none-branch and possible branch target addresses when following the instruction execution path. + +***`ITargetMemAccess`***: Provides an interface to memory images of the code executed in the trace run. Used by the packet decoder to follow the instruction execution path. These images may be in the form of files or memory buffers. + +***`ITrcGenElemIn`***: This interface is provided by the client application – it accepts the generic trace packets from the decoder and analyses them according to its need. Packets from all the decoders in the tree are passed to a single interface instance in the client, the source of the packets is identified by Trace ID. + +***`IPktRawDataMon

`***: This interface is optionally provided by the client application if it wants to monitor the protocol specific packets generated by the packet processor, when the main output from the packet processor is directed to the packet decoder. The test program described below uses this interface to print such packets. + +### **Configuration using the C++ API** + +Configuration using the C++ API begins with the creation of a decode tree. + +![Redo Box 1](/linaro-website/images/blog/redo-box-1-core-dump) + +The flag \[*OCSD\_TRC\_SRC\_FRAME\_FORMATTED*] tells the creation function to automatically set up the de-multiplexor for the CoreSight trace formatted frame. The second parameter tells the de-multiplexor that there are no frame syncs in the incoming raw trace stream (frame syncs are used when trace is output via a TPIU). This is by far the most common trace format when analysing trace captured on target. The library has a  built-in  Arm instruction set opcode analyser which will be created and automatically attached to the decoders. + +Next the individual decoders are created. The creation of a decoder requires that the decoder configuration information is provided – this is in the form of the ocsd\_xyz\_cfg structures and classes. The client application must fill in the structure / class and pass this to the decoder creation API on the decoder tree. + +![Box 3](/linaro-website/images/blog/box-3-core-dump) + +Decoders are selected by name – those built-in to the library have defined names in the library headers  \[e.g. *OCSD\_BUILTIN\_DCD\_PTM*]. The API allows for the creation of a packet processor only (used for debugging trace hardware), or more usually a full packet processor / packet decoder pair \[*OCSD\_CREATE\_FLG\_FULL\_DECODER*]. + +![Redo Box 2](/linaro-website/images/blog/redo-box-2-core-dump) + +Having created all the required decoders, the next stage is to add the memory images to the memory access handler interface. Memory images can take a number of forms: + +* Simple memory dumps from the target system in form of contiguous binary files. + +* A memory buffer provided by the client application. + +* Program executable files or library .so files. The client must know the correct region within the file that contains the opcodes and the load address for the file. + +* A call-back function to access memory can be provided. This can be used to allow a client to provide custom memory image access, or if the decode is running on the target system then program memory can be accessed directly. + +Each of these memory image scenarios has an associated memory accessor class, which contains the start address of the image in the target memory map and the method to read the image (which may be a file or a buffer). + +Single or multiple memory accessors can be used, which are handed by the memory accessor mapper. This selects the correct memory accessor according to the address needed by the packet decoder. This relationship is shown in Figure 4. + +API calls for to create the memory accessors and add them to the memory accessor mapper are provided on the Decode Tree. + +![Figure 4](/linaro-website/images/blog/figure-4-core-dump) + +Figure 4:  Memory Access Handler + +Figure 4 shows a typical example – a trace session has been run tracing a program ‘my\_prog’ which in turn loads ‘my\_lib.so’. The client adds these as memory images using a file memory accessor object to the decode tree in order to correctly process the trace data. The example code below shows how this is achieved: + +![Redo Box 3](/linaro-website/images/blog/redo-box-3-core-dump) + +A memory  accessor  mapper is created in the decode tree. This needs to occur only once per decode tree. The file images are then added by populating the \_ocsd\_file\_mem\_region\_t \_structure. An array of these structures is passed to the memory accessor creation function which adds the accessor to the mapper. + +The *OCSD\_MEM\_SPACE\_ANY* parameter tags this memory image as existing for any memory space. The memory image can be tagged as existing in secure memory space,  the non-secure memory space, or as in this case both.  When decoding the trace, the decoder will use the memory space according to the trace data. The example is created with the ‘any’ tag – indicating it is valid for any memory space that the trace covers. The memory mapper will handle selecting the correct memory image according to address range and memory space when a memory request arrives from the decoder. The narrowest memory space will take priority over a more general one – overlapping memory ranges are only allowed if the memory spaces are different. + +Finally, the client must also provide the interface that will receive the generic  output packets from the decoders. + +![Redo Box 4](/linaro-website/images/blog/redo-box-4-core-dump) + +The decode tree is now ready to process trace data. + +### **Configuration using the C API** + +Configuration using the C API follows the same pattern as with the C++ API. Many of the C API functions serve as wrappers for the C++ API equivalents. + +Create a decode tree – will return a handle for the tree or 0 on failure. + +![Box 7 ](/linaro-website/images/blog/box-7-core-dump) + +Create decoders, using the handle supplied in the create tree operation:- + +![Box 5 ](/linaro-website/images/blog/redo-box-5-core-dump) + +It should be noted here that the creation function returns the Trace ID that the decoder is associated with. This is extracted from the configuration data and may be used in other C API calls for operations related to this specific decoder. + +Add the memory images for the trace decoding. One key difference here is that the mapper is created automatically on the first “add image” call on the decode tree. + +![Box 6](/linaro-website/images/blog/redo-box-6-core-dump) + +The output interface is provided by registering a call-back function. + +![Box 7](/linaro-website/images/blog/redo-box-7-core-dump) + +## **The Trace Data Path and API** + +The decode tree provides a single input interface for the raw trace data (***ITrcDataIn*)**, and a single output interface for the decoded trace in the form of generic trace packets (***ITrcGenElemIn***). + +The input interface defines a series of data path operations that are used to control the processing of the trace data on the configured decoder. The interface function on ***ITrcDataIn*** is:- + +```c +ocsd_datapath_resp_t TraceDataIn( const ocsd_datapath_op_t op, + + const ocsd_trc_index_t index, + + const uint32_t dataBlockSize, + + const uint8_t * pDataBlock, + + uint32_t *numBytesProcessed) + +``` + +The *ocsd\_datapath\_op\_t op \_parameter defines the operation for the current call. The data path  response type returned (\_ocsd\_datapath\_resp\_t*) will inform the next operation required.  A single byte of data input to the decoder can result in multiple output packets, so a response mechanism provides the ability for downstream processing to tell the input operation to WAIT. + +The index parameter defines the byte index within the captured trace data being processed that is the start of the data block passed into the decoder  - as defined by the *pDataBlock* and *dataBlockSize* parameters. This allows large trace data files to be read in smaller blocks, and also allows for the fact that all of the supplied block may not be processed if a WAIT response is seen. The *numBytesProcessed* parameter returns the actual amount of the incoming block that is processed. + +The equivalent C API function contains the same set of parameters, plus a decode tree handle: + +```c +ocsd_datapath_resp_t ocsd_dt_process_data(const dcd_tree_handle_t handle, + +  const ocsd_datapath_op_t op, + + const ocsd_trc_index_t index, + +    const uint32_t dataBlockSize, + + const uint8_t *pDataBlock, + + uint32_t *numBytesProcessed) +``` + +The data path operations are shown in the table below:- + +![data ops table 1](/linaro-website/images/blog/data-ops-table-1) + +The data path response types are shown below:- + +![data resp table 2](/linaro-website/images/blog/data-resp-table-2) + +If a fatal error occurs then the client may look at the last logged error to determine the cause (the decode tree API provides a getLastError function).  Depending on the nature of the error the decoder may be able to be re-used by sending the *OCSD\_OP\_RESET* operation through the input interface. + +The operation parameter is propagated through to the packet decoder stage, but not to the output interface. + +The interface function on the generic packet output (***ITrcGenElemIn***)  interface is:- + +```c +ocsd_datapath_resp_t TraceElemIn(const ocsd_trc_index_t   index_sop, + + const uint8_ttrc_chan_id, + +    const OcsdTraceElement & elem) +``` + +The *index\_sop* parameter is the byte index within the captured trace buffer for the trace protocol packet that generated the output packet. As a single protocol packet can generate a number of output packets this may be the same for a number of packets. + +The *trc\_chan\_id* is the Trace ID of the trace source that generated the packet, and elem is the output packet itself. + +This interface returns a data path response type, allowing the client analyser to cause the processing to WAIT, if for example it is buffering packets and needs to process the current batch before continuing,  or signal an error using a *FATAL code.* + +The client is responsible for ensuring that the correct operations are used, including the WAIT / FLUSH requirements. The test programs give an example of correctly driving the decode tree. + +## **The Library Test Programs and Test Data.** + +The test programs are used for testing the library components and APIs, and are also source code examples of how to the use the library. + +### **Test Data** + +The library source ships with some test data in the ***\snapshots*** directory. This data is trace captured from target systems and saved in a “snapshot” format – an Arm DS-5 open standard1 format that provides sufficient information to decode the captured trace. The snapshot consists of a set of ***.ini*** files and binary data that provide: + +* Configuration of the ETM/PTM registers. +* Core architecture and type. +* Binary files with captured trace data from the trace buffers in the system. +* Binary files with memory image dumps from the trace run. +* The connections between cores / ETMs & PTMs and trace buffers. + +### **trc\_pkt\_lister\*\***: The C++ library test program.\*\* + +This program is used to test the core C++ library functionality by decoding the snapshot formatted test data. + +The program will decode all the trace sources in a given buffer, for which it can find a valid decoder. The default operation is to print out the protocol packets from the packet processor stage only, but options can be provided to run the packets through the packet decoder stage and print out the generic packets, or do both operations. The data is printed to screen and/or a text file, and options are available to filter the output by Trace ID. + +The program uses the *snapshot\_parser\_lib\_\_ library*, which contains the code to interpret the snapshots, and build a decode tree using this information. The *CreateDcdTreeFromSnapShot\_\_ class* performs this task. The *CreateDcdTreeFromSnapShot::createDecodeTree* function performs many of the configuration tasks described above. + +The main body of the test program (*trc\_pkt\_lister.cpp*) processes the command line options then calls the snapshot parser to read the snapshot accordingly and then create a decode tree. Once the decode tree has been created, then the output interfaces (packet protocol elements or generic elements) are connected to functions to print out the packets generated by the program. + +All the packet type classes in the decode library contain a function that will produce a string representation of the packet. This way it is easy to visualise packets and debug trace or trace decoders. + +Once all elements are configure the test program will push the trace data through the library to generate the output files. + +An example command line, running the test program on the Juno R1 snapshot, filtering for a single trace ID is as follows:- + +***trc\_pkt\_lister -ss\_dir ../../../snapshots/juno\_r1\_1 -decode -id 0x10*** + +The *–decode* option forces a full decode, the –id option is a Trace ID filter. Full information about the available options can be found using the programs *–help* option. + +This will produce an output file – defaulting to *trc\_pkt\_lister.ppl*, a small portion of the output is shown below, both the packet processor ETMv4 specific packets  (highlighted in blue), and the generic trace decoder output packets (highlight in red). + +```c +Idx:1643; ID:10; [0x00 0xf7 0x95 0xa2 0xa5 0xdb ]; I_NOT_SYNC : I Stream not synchronised +Idx:1650; ID:10; [0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x80 ]; I_ASYNC : Alignment Synchronisation. +Idx:1662; ID:10; [0x01 0x01 0x00 ]; I_TRACE_INFO : Trace Info. +Idx:1666; ID:10; [0x9d 0x00 0x35 0x09 0x00 0xc0 0xff 0xff 0xff ]; I_ADDR_L_64IS0 : Address, Long, 64 bit, IS0.; Addr=0xFFFFFFC000096A00; +Idx:1675; ID:10; [0x04 ]; I_TRACE_ON : Trace On. +Idx:1676; ID:10; [0x85 0x00 0x35 0x09 0x00 0xc0 0xff 0xff 0xff 0xf1 0x00 0x00 0x00 0x00 0x00 ]; I_ADDR_CTXT_L_64IS0 : Address & Context, Long, 64 bit, IS0.; Addr=0xFFFFFFC000096A00; Ctxt: AArch64,EL1, NS; CID=0x00000000; VMID=0x0000; +Idx:1692; ID:10; [0xf7 ]; I_ATOM_F1 : Atom format 1.; E + +Idx:1675; ID:10; OCSD_GEN_TRC_ELEM_TRACE_ON( [begin or filter]) +Idx:1676; ID:10; OCSD_GEN_TRC_ELEM_PE_CONTEXT((ISA=Unk) EL1N; 64-bit; VMID=0x0; CTXTID=0x0; ) + +Idx:1692; ID:10; OCSD_GEN_TRC_ELEM_INSTR_RANGE(exec range=0xffffffc000096a00:[0xffffffc000096a10] (ISA=A64) E ISB ) + +Idx:1693; ID:10; [0x9d 0x30 0x25 0x59 0x00 0xc0 0xff 0xff 0xff ]; I_ADDR_L_64IS0 : Address, Long, 64 bit, IS0.; Addr=0xFFFFFFC000594AC0; +Idx:1703; ID:10; [0xf7 ]; I_ATOM_F1 : Atom format 1.; E + +Idx:1703; ID:10; OCSD_GEN_TRC_ELEM_ADDR_NACC( 0xffffffc000594ac0 ) + +``` + +This part of the output shows the point where the decoder finds a synchronisation point in the trace stream. We can see the ETMv4 ASYNC, TRACE\_INFO, address and context packets which set the core state and start address for the trace session.  This will cause the output of the OCSD\_GEN\_TRACE\_ELEM\_ON() and OCSD\_GEN\_TRC\_ELEM\_PE\_CONTEXT() packets. + +The next packet (ATOM*F1) indicates a waypoint in the waypoint trace (\_for information on waypoint trace see the ETMv4 documentation*). The decoder will then follow the memory image from the start address until it finds the first instruction that is a waypoint. This will generate the OCSD\_GEN\_TRC\_ELEM\_INSTR\_RANGE() packet indicating execution of all instructions from the start address (0xffffffc000096a00), up to but not including the range end address (0xffffffc000096a10). + +This next output portion shows that a single byte packet can result in multiple generic output packets and many lines of instruction execution decoded: + +```c +Idx:1737; ID:10; [0xfd ]; I_ATOM_F3 : Atom format 3.; ENE + +Idx:1737; ID:10; OCSD_GEN_TRC_ELEM_INSTR_RANGE(exec range=0xffffffc000083280:[0xffffffc000083284] (ISA=A64) E BR  ) +Idx:1737; ID:10; OCSD_GEN_TRC_ELEM_INSTR_RANGE(exec range=0xffffffc000083d40:[0xffffffc000083d9c] (ISA=A64) N BR  ) +Idx:1737; ID:10; OCSD_GEN_TRC_ELEM_INSTR_RANGE(exec range=0xffffffc000083d9c:[0xffffffc000083dac] (ISA=A64) E iBR b+link ) + +``` + +The single ATOM packet represents 3 waypoint instructions. This results in 3 traced ranges;  with 1, 23 and 4 instructions executed. + +### **c\_api\_pkt\_print\_test** **: The C API test program** + +This test program checks the correct function and implementation of the C API wrapper library and interfaces. + +It is more limited in functionality that the C++ test program – it will decode only a single trace ID source, and has the paths to the snapshot directories hard coded in, selecting an appropriate test snapshot according to the protocol decoder under test. + +By default the program will decode ETMv4, trace ID 0x10 – selecting the Juno r1 snapshot. + +Alternate protocols can be tested using command line options – with additional options used to test the different memory accessor API calls. + +The output is a text file (*c\_api\_test.log*), similar to that created by the C++ test program. + +## **Conclusions** + +This article has described the operation of the OpenCSD library and some of the main API functions that may be used for configuration of the library and the decoding of CoreSight trace data. + +The “decode tree” is the key abstraction provided to configure and use the library in order to decode data from a trace sink. + +The library source code has inline documentation that may be extracted using the ‘doxygen’ tool for more detailed information on the API functions covered and the additional APIs available. + +Information on the CoreSight components and trace protocols mentioned here is available from the Arm website. A document describing the DS-5 “snapshot” format is due to be published shortly. + +This article has covered configuring the existing trace decoders built into the library for the currently supported set of Arm trace protocols. A future article in this series on OpenCSD will cover adding additional decoders into the library to cope with new or custom trace protocols or for other specialised decode. These custom decoders can either be compiled in as part of the C++ library, using the existing base infrastructure, or as an external binary implementation using an external registration API in the C API interface. + +*** + +1. To be published soon diff --git a/src/content/blogs/qualcomm-innovation-center-becomes-core-member-of-linaro.mdx b/src/content/blogs/qualcomm-innovation-center-becomes-core-member-of-linaro.mdx new file mode 100644 index 0000000..fef4e35 --- /dev/null +++ b/src/content/blogs/qualcomm-innovation-center-becomes-core-member-of-linaro.mdx @@ -0,0 +1,32 @@ +--- +excerpt: Qualcomm Innovation Center becomes Core Member of Linaro +author: linaro +description: Qualcomm Innovation Center becomes Core Member of Linaro +date: 2015-07-27T14:42:17.000Z +comments: false +title: Qualcomm Innovation Center becomes Core Member of Linaro +tags: + - linux-kernel +link: /news/qualcomm-innovation-center-becomes-core-member-of-linaro/ +image: linaro-website/images/blog/Code_Image_Core_tech +related: [] + +--- + +Cambridge, UK; 27 July 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that Qualcomm Innovation Center, Inc. (QuIC), a subsidiary of Qualcomm Incorporated, has become a Core Member of Linaro. + +QuIC joined Linaro as a Club Member in February 2014 and actively participates in the Linaro Enterprise Group (LEG), the Linaro Mobile Group (LMG) and the Linaro Community Boards Group (LCG). QuIC joins Arm Ltd and HiSilicon, the SoC subsidiary of Huawei, as the third Core Member of Linaro. Core Members provide significant resources to Linaro, and participate across Linaro’s activities ranging from core open source software to specific projects from the mobile and embedded sectors to networking and enterprise products. + +QuIC has brought a history of innovation in wireless technology into Linaro, as well as adds its extensive open source experience to the 200+ open source software engineers already working in Linaro helping to accelerate the delivery of open source software across the industry. + +“We are delighted that QuIC is further developing its relationship with Linaro as a Core member”, said George Grey, Linaro CEO. “QuIC has a long history of supporting the open source community and has become a very influential member of Linaro through its commitment to open source, technical excellence and experience.” + +Jason Bremner, senior vice president product management for Qualcomm Technologies, Inc. said, “QuIC has been committed to working with Linaro and other open source communities to accelerate the development of technologies on the Arm platform. After working for the past year with Linaro, we saw the advantages of becoming a Core Member and improving our ability to both further collaborate and benefit from the work that Linaro is doing.” + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company is a collaborative engineering organization with over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro’s engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/rapidly-growing-chinese-mobile-phone-maker-tinno-mobile-joins-linaro-mobile-group.mdx b/src/content/blogs/rapidly-growing-chinese-mobile-phone-maker-tinno-mobile-joins-linaro-mobile-group.mdx new file mode 100644 index 0000000..86c5965 --- /dev/null +++ b/src/content/blogs/rapidly-growing-chinese-mobile-phone-maker-tinno-mobile-joins-linaro-mobile-group.mdx @@ -0,0 +1,35 @@ +--- +excerpt: Tinno Mobile Joins Linaro Mobile Group +title: Tinno Mobile Joins Linaro Mobile Group +description: Linaro announces that Shenzhen Tinno Mobile Technology Company Ltd + has joined the Linaro Mobile Group (LMG). Read more here. +image: linaro-website/images/blog/consumer-bg +author: linaro +date: 2015-10-28T13:05:39.000Z +tags: + - linux-kernel + - open-source +link: /news/rapidly-growing-chinese-mobile-phone-maker-tinno-mobile-joins-linaro-mobile-group/ +related: [] + +--- + +Cambridge, UK; 28 October 2015 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, today announced that Shenzhen Tinno Mobile Technology Company Ltd has joined the Linaro Mobile Group (LMG). + +“Tinno Mobile is often referred to as the ‘hidden strong player’ behind leading French mobile phone brand Wiko, but the company is much more than that”, said Jill Guo, EVP, Linaro Greater China Region. “We’re very pleased to welcome Tinno to LMG and look forward to working with them and the other LMG members to develop shared mobile platform software for the latest Arm architecture on which they can further build their technological leadership.” + +LMG was formed in July 2014 to consolidate and optimize open source software for Arm powered mobile phones, tablets, laptops and wearables. The Group's engineers work on the Android Open Source Project (AOSP), Performance and Power optimizations, Graphics and GPGPU, and work closely with other groups in Linaro Core Engineering on other open source technologies. Tinno Mobile brings additional representation of the needs of today's mobile phone manufacturers to LMG and this will help the group and its silicon and software vendor members to work on projects that will accelerate the market. + +“Tinno Mobile is proud of its technological leadership in areas such as dual-SIM mobile phones”, said Wang Bin, CTO of Tinno Mobile. “We’re very pleased to join Linaro to work with other leading mobile players on core platform software that will enable us to fully leverage and contribute to the long-term development of open source software.” + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 250 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/). + +**About Tinno Mobile** +Shenzhen Tinno Mobile Technology Company Ltd is one of the most famous mobile phone manufacturers in China. Established in 2002, the company’s headquarters are located in Shenzhen, with two factories in DongGuan specializing in designing and manufacturing mobile phones. Tinno Mobile has complete in-house capabilities including strong R\&D, manufacturing and worldwide sales. The company is the manufacturer of the first double GSM double online mobile phone in the world, with its own dual SIM mobile phone core technology patent and extensive experience in this area. + +Every month, over a quarter of a million Tinno Mobile products (mainly dual GSM Sim mobile phone) are exported to overseas markets including Germany, France, Turkey, Ukraine, Brasil and some Asian countries like India, Thailand and the Philippines. The company’s export quantity has been rising sharply. It has not yet entered into most of the South American markets yet, so there are strong potential growth opportunities being developed. [www.tinno.com](http://www.tinno.com/) diff --git a/src/content/blogs/running-64bit-android-l-qemu.mdx b/src/content/blogs/running-64bit-android-l-qemu.mdx new file mode 100644 index 0000000..e067e9b --- /dev/null +++ b/src/content/blogs/running-64bit-android-l-qemu.mdx @@ -0,0 +1,98 @@ +--- +author: alex-bennee +date: 2014-08-21T07:00:04.000Z +comments: false +title: Running Android L Developer Preview on 64-bit Arm QEMU +tags: + - android + - qemu +description: > + In this article, Alex Bennee takes a look at running Android L Developer + preview on a 64-bit Arm QEMU. Read about his findings here! +link: /blog/core-dump/running-64bit-android-l-qemu/ +image: linaro-website/images/blog/30921188158_953bca1c9f_k +related: [] + +--- + +## Running Android L Developer Preview on 64-bit Arm QEMU + +![QEMU](/linaro-website/images/blog/quem) + +Did you know the Android emulator is based on QEMU? + +When the Android SDK was first made available to the world, Google used QEMU as the basis for their Android emulator. They copied the source code to a custom repository in the Android Open Source Project (AOSP) and made a number of invasive modifications to QEMU. Specifically, they added emulation of a specific board called goldfish for the purposes of emulating an Android phone. + +Every build of Android targets a specific hardware platform, and the emulated goldfish platform is no different. A number of specific emulator features are enabled in both the Android kernel and Android userspace environment when run in an emulated environment. These features allow a smooth and complete user experience resembling using a real Android device, on laptop and desktop workstations. + +The Android emulator provides Android application developers with a convenient development environment and allows developers to develop and test applications on devices which they do not have physical access to. With the introduction of the Armv8-A architecture and Android support for 64-bit Arm platforms, this need is more important than ever because it allows developers to begin adapting their applications to an Arm 64-bit based mobile ecosystem prior to hardware being available. + +## Differences to mainline QEMU + +There are a number of differences to mainline QEMU that fall broadly into the two categories: User Interface and Emulated Devices. + +The UI patches support skinning of the framebuffer window so a mock up of the emulated devices’ keypad/buttons can be displayed alongside the screen. The other big user interface component is accelerated graphics support. Accelerated graphics is a key component in modern mobile systems and a crucial for a reasonable application performance experience. The Android emulator provides accelerated graphics support in the emulated guest Android operating system, by providing an OpenGL passthrough mechanism that allows the emulated Android system to make OpenGL calls that are directly rendered by the host accelerated graphics stack to the emulated window without having to emulate a GPU inside QEMU itself. Emulating GPU hardware is certainly not something you want (or could!) emulate directly, because GPUs are complicated proprietary pieces of hardware with strict performance requirements. + +The emulated devices include a fast IPC mechanism known as the “qemu\_pipe” that provides a way for the emulated Android userspace to communicate with the host machine. This is used by both the adb service to communicate with the guest adb daemon and console services and to provide a fast passthrough path for the emulated accelerated graphics support described above. The Android emulator also emulates GSM support, a GPS chip, sensors (proximity and rotation), and more to provide application developers with an experience close to a real phone and to allow developers to test all aspects of their applications, such as how an application responds to a user rotating a device or an application requesting the current location. + +## Along comes Android L + +![Android-L](/linaro-website/images/blog/Android-L) + +Google recently announced Android L at Google I/O. One of the major new features in Android L is the support for the Armv8-A 64-bit architecture. Given the growth in performance and memory capacity of mobile devices, 64-bit support is a crucial feature for embracing the future. We now carry in our pockets what in olden days would have been described as nothing less than a supercomputer. + +As you may have noticed, thanks to Linaro, the latest version of upstream QEMU (2.1) now includes full Armv8 system emulation support. This means that users can use upstream QEMU to run a full 64-bit Armv8-A kernel and filesystem, such as a 64-bit Ubuntu cloud image. This was no small endeavour as it involved emulating a completely new instruction set, exception model, CPU implementation, and more. The implementation was verified with a custom instruction verification tool ([RISU](https://git.linaro.org/people/peter.maydell/risu.git)) and was heavily reviewed upstream by an engaged and incredibly supportive upstream QEMU community. + +Reimplementing 64-bit Armv8-A support in the old Android emulator fork of QEMU would be a herculean effort and attempts to backport the changes from upstream QEMU to the Android emulator were not successful. Consequently, there was a sudden and strong desire within Google and from Linaro members to instead forward port the Android emulator specific features to upstream QEMU and have such an implementation ready for the Android L introduction at Google I/O. At the time, this was roughly three weeks away. + +## Enter Linaro + +Linaro assembled a small team who between us had experience in QEMU, the Linux kernel, and the Android ecosystem. While Google had started some of the forward porting work for 32-bit Android support, it was taking longer than they liked as they weren’t familiar with the current state of the upstream QEMU code base and were busy preparing for Google I/O. + +We delivered an upstream based branch of QEMU with minimal changes that could run a stable emulated Android instance on 64-bit Arm. We also provided a branch of the official Android 3.10 based Linux kernel with backported 64-bit Armv8 support based on a minimal set of necessary topic branches used for Linaro’s Stable Kernel (LSK). We spent a few extra days to fix some of the issues that were found when stress testing the “qemu pipe” IPC mechanism. After Google I/O we also did some performance analysis on the emulation to identify some performance tweaks to the main emulator that are in the process of being upstreamed now. +The virtualization team in Linaro is working with Google to also provide Android support based on upstream 32-bit Arm QEMU as well as providing a number of missing features from the current prototype, such as display rotation, and a number of Android emulator console commands. We expect to be able to provide QEMU branches with this support in Q4 2014. + +## I want to try + +As with all our code we at Linaro do our work in public and with presumption of upstream first. If you want to play with the results of our work, you are more than welcome to. The details will be the subject of a follow-up post/wiki entry but I’ll sketch out the basic steps here that assume familiarity with building the kernel, android and QEMI. The major **CAVEAT** at the moment is that we haven’t implemented OpenGL passthrough (as there is no current suitable solution that targets upstream) so as a result all the graphics are rendered in software on the emulated device. Anything needing a proper OpenGL implementation (e.g. the browser) won’t start for this reason. We expect both an OpenGL implementation and skinning support based on Linaro’s upstream work will be made available in due course. + +### You will need: + +A custom arm64 build of the [ranchu kernel](https://git.linaro.org/people/) + +``` +ARCH=arm64 make ranchu_defconfig + +ARCH=arm64 make CROSS_COMPILE=aarch64-linux-gnu- +``` + +A patched version of the AOSP tree (master or l-preview branch), with qemu\_pipe tweak(http://people.linaro.org/\~alex.bennee/android/android-init-tweaks.diff) + +```bash + tar -xvf linaro-devices.tar.gz + + source build/envsetup.sh + + lunch ranchu-userdebug +``` + +A copy of [our QEMU branch](https://git.linaro.org/people/peter.maydell/qemu-arm.git/refs/heads) + +```bash + ./configure --target-list=aarch64-softmmu + + make +``` + +Some spare time (there is a lot to compile) + +**Finally you can launch the emulator with a command line like this:** + +```bash + ../qemu.git/aarch64-softmmu/qemu-system-aarch64 -cpu cortex-a57 -machine type=ranchu -m 4096 -kernel ./ranchu-kernel/Image -append 'console=ttyAMA0,38400 keep + _bootcon' -monitor stdio -initrd ranchu-build/ramdisk.img -drive index=2,id=userdata,file=ranchu-build/userdata.img -device virtio-blk-device,drive=us + erdata -device virtio-blk-device,drive=cache -drive index=1,id=cache,file=ranchu-build/cache.img -device virtio-blk-device,drive=system -drive index=0,id=system,file=ranchu-build/system.img - + netdev user,id=mynet -device virtio-net-device,netdev=mynet -show-cursor  +``` + +I have symlinks in my test directory to try and keep things sane. So ranchu-kernel links to arch/arm64/boot in my kernel tree and ranchu-build links to out/target/product/ranchu in my android tree. Please note the order of the block devices on the command line is important. diff --git a/src/content/blogs/sandia-national-laboratories-joins-the-linaro-hpc-special-interest-group.mdx b/src/content/blogs/sandia-national-laboratories-joins-the-linaro-hpc-special-interest-group.mdx new file mode 100644 index 0000000..08d1f65 --- /dev/null +++ b/src/content/blogs/sandia-national-laboratories-joins-the-linaro-hpc-special-interest-group.mdx @@ -0,0 +1,33 @@ +--- +title: Sandia National Laboratories joins the Linaro HPC Special Interest Group +author: linaro +date: 2018-08-30T09:00:00.000Z +description: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm ecosystem, announced today that Sandia + National Laboratories (Sandia) has joined Linaro’s High Performance Compute + (HPC) Special Interest Group as an advanced end user of mission-critical HPC + systems. +tags: + - arm +related: [] + +--- + +Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, announced today that Sandia National Laboratories (Sandia) has joined Linaro’s High Performance Compute (HPC) Special Interest Group as an advanced end user of mission-critical HPC systems. + +In June 2018, Sandia [announced](https://share-ng.sandia.gov/news/resources/news_releases/arm_supercomputer/) that in the late summer it expected to deploy Astra, one of the first supercomputers to use processors based on the Arm architecture in a large-scale high-performance computing platform. This system, which is due to be deployed in September, requires a complete vertically integrated software stack for Arm: from the operating system through compilers and math libraries. Sandia and Linaro will work together with the other members of the HPC SIG to jointly address hardware and software challenges, expand the HPC ecosystem by developing and proving new technologies and increase technology and vendor choices for future platforms. + +“Determining the viability of the Arm ecosystem for supporting of our mission applications requires a significant, concentrated effort,” said Kevin Pedretti, Principal Member of Technical staff at Sandia National Laboratories and systems software lead for Astra. “By joining the Linaro HPC SIG, we plan to collaborate on Astra’s software stack components, ensuring our HPC applications work efficiently while benefitting the wider HPC community.  We expect that Linaro’s expertise in Arm software will help enable us to accelerate the demonstration of Arm as a viable option for running our large-scale production HPC applications and enable us to work more effectively with the wider Arm community.” + +Sandia supports the US National Nuclear Security Administration (NNSA) to ensure specialized components are developed, tested and produced, and that the United States’ nuclear weapons are quality assured. The NNSA runs the Vanguard Program, the purpose of which is to evaluate different high-performance computing technologies to determine whether they can help support the NNSA’s mission to maintain safety, security and effectiveness of the US nuclear stockpile. As a result, Sandia has made significant investment over the past seven years as part of its Advanced Architecture testbed program to help grow the ecosystem for Arm processors in HPC. + +Linaro and its members created the HPC Special Interest Group (SIG) in September 2016 to drive the adoption of Arm in high-performance computing through standardisation, interoperability, orchestration and use case development. Linaro provides a forum where SoCs, system vendors, integrators, users, distros, hyperscalers can co-develop the foundational software to make choice easier for the desired application space. The HPC SIG is currently working to leverage Arm hardware around server class infrastructure, multi-gigabit interconnect support, scalable vector extensions and software ecosystem support to build exascale HPC deployments. The focus is on three segments: hardware deployment, software ecosystem and optimised libraries. + +“We’re very pleased to welcome Sandia to the HPC SIG and we look forward to working with them and the other industry leading HPC SIG members to enable the most effective deployment and management of Arm-based HPC solutions,” said Kanta Vekaria, Director of the Linaro HPC SIG. “Sandia’s Vanguard Astra will be the world’s largest Arm-based supercomputer used by the National Nuclear Security Administration (NNSA) to run advanced modelling and simulation workloads for mission critical applications in areas including national security, energy and science. This sets an impressive vision that we look forward to helping Sandia achieve.” + +**About Sandia National Laboratories**\ +Sandia National Laboratories is a multimission laboratory operated by National Technology and Engineering Solutions of Sandia LLC, a wholly owned subsidiary of Honeywell International Inc., for the U.S. Department of Energy’s National Nuclear Security Administration. Sandia Labs has major research and development responsibilities in nuclear deterrence, global security, defense, energy technologies and economic competitiveness, with main facilities in Albuquerque, New Mexico, and Livermore, California. + +**About Linaro**\ +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10.\ +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96boards.org/) . diff --git a/src/content/blogs/softbank-joins-96boards-steering-committee.mdx b/src/content/blogs/softbank-joins-96boards-steering-committee.mdx new file mode 100644 index 0000000..196c07a --- /dev/null +++ b/src/content/blogs/softbank-joins-96boards-steering-committee.mdx @@ -0,0 +1,38 @@ +--- +title: SoftBank joins Linaro 96Boards Steering Committee +author: linaro +date: 2018-07-18T09:00:00.000Z +description: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm® ecosystem, announced today that Japanese + telecommunications giant SoftBank Corp. has joined the 96Boards initiative as + a Steering Committee member. +image: linaro-website/images/blog/softbank-joins-96boards +tags: + - open-source + - arm +published: true +related: [] + +--- + +\[Cambridge, UK; 18 July 2018] Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, announced today that Japanese telecommunications giant SoftBank Corp. has joined the 96Boards initiative as a Steering Committee member. SoftBank and Linaro began cooperating in Japan in September 2017 to enable the interconnection between SoftBank IoT Platform and the Consumer Edition (CE) and Internet of Things Edition (IE) 96Boards products. The 96Boards Steering Committee provides a neutral forum in which SoftBank can cooperate closely with other 96Boards partners and promote its online services and 96Boards related products to developers. + +96Boards is Linaro’s initiative to build a single worldwide software and hardware community across low-cost development boards based on Arm technology. A large range of products compliant with the 96Boards specifications are already available worldwide and this range is supplemented with additional hardware functionality provided through standardized mezzanine boards. With open source software and support for cloud services already available, SoftBank’s increased involvement in 96Boards will extend the range of services available to developers using 96Boards products and boost the presence of 96Boards products in Japan. + +“96Boards have been widely adopted by developers around the world” said Hironobu Tamba, Vice President, Smart IoT Division, “by joining the 96Boards Steering Committee, we expect to improve interconnection between SoftBank IoT platform and 96Boards, so that developers can easily verify their IoT system design by using 96Boards, and confirmed and analyzed their data by SoftBank IoT platform. By promoting collaboration with Linaro in the future, SoftBank will solve various problems in the IoT era, such as standardization of specifications in global standards, and will offer a selectable and optimal IoT platform.” + +The 96Boards steering committee now includes more than twenty companies who are working together on Consumer, Enterprise, TV Platform, Networking, IoT and SOM specifications. To date, the Consumer, Enterprise and TV Platform and IoT specifications have been released with boards available for each. In addition there are a large number of mezzanine products and other accessories available for a range of applications from industrial control and robotics, through AI, HPC and IoT, on to data centers and edge applications. + +“We are excited to welcome SoftBank to the 96Boards Steering Committee and look forward to  the benefits they will bring to the 96Boards range of specifications and products built around the specifications” said Yang Zhang, Director of 96Boards. “SoftBank is uniquely positioned to help connect the vast range of vendors from across the ecosystem and foster accelerated collaboration on standardized platforms for the future.” + +![SoftBank joins 96Boards Steering Committee](/linaro-website/images/blog/softbank-joins-96boards) + +### About SoftBank + +SoftBank Corp., a subsidiary of SoftBank Group Corp. (TOKYO:9984), provides mobile communication, fixed-line communication and Internet connection services to customers in Japan. Leveraging synergies with other companies in the SoftBank Group, SoftBank Corp. aims to transform lifestyles through ICT and expand into other business areas including IoT, robotics and energy. To learn more, please visit [www.softbank.jp/en/corp/group/sbm/](http://www.softbank.jp/en/corp/group/sbm/). + +### About Linaro + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/software-leaders-advise-linaro.mdx b/src/content/blogs/software-leaders-advise-linaro.mdx new file mode 100644 index 0000000..5aff391 --- /dev/null +++ b/src/content/blogs/software-leaders-advise-linaro.mdx @@ -0,0 +1,32 @@ +--- +author: linaro +date: 2010-12-13T12:08:44.000Z +description: CAMBRIDGE, UK - 13 DEC 2010 +link: /news/software-leaders-advise-linaro/ +title: Software Leaders to Advise Linaro +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 13 DEC 2010 + +## Canonical, GENIVI, HP, LiMo and MontaVista become Advisors to Linaro + +Following completion of its first major release in November, Linaro announces the expansion of its ecosystem to include Advisory Partners Canonical, GENIVI, HP, LiMo Foundation and MontaVista Software all of whom are involved in building complex Linux based software. The Advisors will help to guide the Linaro Technical Steering Committee (TSC) on critical industry needs, facilitating the alignment of requirements. + +Linaro exists to accelerate innovation among software developers working on the most advanced semiconductor System-on-Chip (SoC) designs. The current wave of "always-connected, always-on" devices is increasingly turning to Linux and highly integrated SoCs to achieve the performance and battery life consumers demand. + +The availability of a stable, optimized software base and tools make it easier and quicker to develop high performance consumer devices. Manufacturers will benefit from an improved product development cycle, spending less time on low level, nondifferentiating software and more time on product innovation and delivering exceptional user experiences. + +"Having HP, Canonical, GENIVI, LiMo Foundation and MontaVista Software as advisors to our Technical Steering Committee will help us to make the best decisions on resource deployment, for the benefit of both our members and the open source community." said George Grey, CEO, Linaro. "We will continue to build extensive relationships with the electronics industry and open source communities to ensure our engineering is responsive to industry needs and is widely deployed." + +"We believe that Linaro is a critical strategic partner for accelerating the Arm software and silicon ecosystem," said Steve Manser, senior vice president, Product Development, Palm Global Business Unit, Personal Systems Group, HP. "Through collaboration and convergence on common Linux foundational components and tools, HP looks to accelerate our innovative webOS platform with the latest Arm-based SoC designs. webOS will play a prominent role in HP's future, and we see Linaro and the Arm ecosystem as key allies for building that future." + +"Shortening development time, getting products to market quickly and reducing development costs for embedded Linux based products are key goals for both Linaro and GENIVI," said Graham Smethurst, President of GENIVI. "As an advisor to Linaro we can provide aligned automotive industry input for IVI solutions, helping Linaro shape the engineering efforts applied to its optimized foundation of open source tools and software." + +"LiMo's vision is to deliver a vibrant ecosystem of commercial products and servicesthat are enabled through an independent and competitive Linux based mobile software platform developed in close collaboration with key stakeholders in the mobile industry," said Morgan Gillis, Executive Director of LiMo Foundation. "We are delighted to join Linaro's TSC as an Advisor Partner and look forward to collaborating with Linaro in shaping up the next generation Linux ecosystem that will deliver a rich and connected user experience to consumers across a diverse range of devices." + +"As a long time supporter of embedded Linux for Arm processors, MontaVista is delighted to be collaborating with other industry leaders as a Linaro Advisor", said Dan Cauchy, vice president of marketing and business development, MontaVista Software. "Our goal is to help drive rapid innovation of Linux components and tools within Linaro, commercialize the results, and make them available as part of MontaVista Linux for the benefit of our customers, device manufacturers and the Arm ecosystem." + +"As an Advisor Partner we look forward to shaping the next generation of Ubuntu devices with the Linaro ecosystem. Canonical's customers will be able to engage with us to focus on the user experience in the knowledge that the solution is based on shared and standard components" says Jane Silber, CEO at Canonical, "We look forward to working with Linaro to rapidly accelerate the time-to-market for devices, ensuring that users and partners can quickly take advantage of hardware improvements." diff --git a/src/content/blogs/stm-and-its-usage.mdx b/src/content/blogs/stm-and-its-usage.mdx new file mode 100644 index 0000000..a41a98d --- /dev/null +++ b/src/content/blogs/stm-and-its-usage.mdx @@ -0,0 +1,484 @@ +--- +excerpt: Read about System Trace Module (STM) which can not only collect trace + data from software sources, but also monitor hardware events. Learn how to + write traces to STM and how many approaches to do this, etc. +title: System Trace Module (STM) and its usage +description: In this article, Chunyan Zhang takes a detailed look at System + Trace Module (STM) and its usage. Read about his findings here! +image: linaro-website/images/blog/Banner_Linux_Kernel +author: chunyan-zhang +date: 2016-09-06T03:24:15.000Z +tags: + - linux-kernel + - open-source +link: /blog/core-dump/stm-and-its-usage/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner url=https://wiki-archive.linaro.org/CoreDevelopment](/linaro-website/images/blog/core-dump) + +## Introduction + +System Trace Module (STM) is a kind of trace source device, which can not only collect trace data from software sources, but also monitor hardware events. + +Any software program no matter where it is in kernel space or user space can write STM device with message string (i.e. trace data), like using print functions.  This article will mainly focus on software traces of STM - how to write traces to STM and how many approaches to do this, etc. + +Each software or hardware trace source is assigned a unique pair of master and channel, so that a decoder can know which source the trace data come from by this.  As a kind of resource, the number of masters and channels are limited, for example there are 128 masters, each supporting 65,536 channels on Arm CoreSight STM, while Intel STH has up to 65,536 masters and up to 256 channels per master. + +Unlike some traditional tracing approach which would lose all traces once system crashed since the traces are stored in system memory, tracing with STM can survive this kind of case because all traces collected via STM would end up in sink device which can be still alive even the system is dead so long as the hardware design allows it.There’s another benefit of using STM to collect software traces or monitor hardware events.  Since everything is logged to the same STM with timestamps, it is possible to correlate events happening in the entire system rather than being confined to the logging facility of a single entity. + +## Terminology + + + + + + + + + + + + + + + + +
+STM device + +The STM hardware entity +
+stm_source device + + +A virtual device node from the view of sysfs, it is the interface + +for collecting software traces via STM device + +
+ +## Scope + +In this article we will start with two examples of using STM to collect software trace data and then will introduce more about how to do software tracing with STM, including how to config device tree for STM, how the policy of STM master/channel management works, together with an introduction of stm\_source device, an explanation for mapping STM to user space and how to decode STM trace data, what’s the difference of masters on CoreSight STM. + +## The typical use cases of STM + +Let’s start with the kernel space, and then we will see how to use STM from user space. + +### In kernel space + +There’s an stm\_console \[1] which makes STM as an output of Linux console.  Startup the kernel compiled with CONFIG\_STM\_SOURCE\_CONSOLE, you can see a console node under stm\_source directory on the target: + +*# ls /sys/class/stm\_source/console/* + +*power   stm\_source\_link   subsystem   uevent* + +Once linked with a STM device, the stm\_console will be one of outputs of the kernel console logs which will end up in the storage device connected with STM device, for example in Arm CoreSight architeture the operation would be like: + +``` +# echo 1 > /sys/bus/coresight/devices/10003000.etf/enable_sink +# echo 10006000.stm > sys/class/stm_source/console/stm_source_link +[   29.135746] coresight-tmc 10003000.etf: TMC enabled +[   29.140754] coresight-funnel 10001000.funnel: FUNNEL inport 4 enabled +[   29.147391] coresight-stm 10006000.stm: STM tracing enabled +``` + +Note: before linking stm\_source with STM device, the sink device has to be enabled, which stores the trace data. + +And then you will see the Write Pointer Register of sink device would be growing along with the message being logged to the console, in this case: + +``` +# cat sys/bus/coresight/devices/10003000.etf/mgmt/rwp +0x2258 +``` + +To get the trace out and decoded, please refer to the section **Decoding traces with OpenCSD library** below\*\*. + +### In user space + +There are two ways to collect traces with STM device in user space.  One is mmaping STM device to user space for zero-copy writing.  More details of this kind of use case, you can read the section **Mapping STM to user-space** below. + +Another way is writing STM device directly, the user space code will look like: + +``` +trace_data[4] = {0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999}; +fd = open(“/dev/10006000.stm”, O_RDWR | O_SYNC); +write(fd, trace_data, size); +close(fd); +``` + +The whole code of this example program can be found \[5]. + +## **Device Tree configuration of STM** + +The code below shows a typical STM device tree configurations: + +``` +stm@10006000 { +              compatible = "arm,coresight-stm", "arm,primecell"; +              reg = <0 0x10006000 0 0x1000>, +                         <0 0x01000000 0 0x1800000>; +              reg-names = "stm-base", "stm-stimulus-base"; +              clocks = <&clk26mhz>; +              clock-names = "apb_pclk"; +              port { +                          stm_out: endpoint { +                                      remote-endpoint = <&funnel_in_port4>; +                          }; +               }; +}; +``` + +Readers should’ve noticed that STM has two groups of reg addresses here, the first group obviously describes the area of STM registers, the second group whose ‘reg-name’ is ‘stm-stimulus-base’ describes the physical base address and the length of stimulus ports area, stimulus ports are also known as STM channels. + +In the following examples of this article, I will use this STM, the whole code can be found here \[2]. + +## STM source device + +To collect software traces with STM from kernel space, stm\_source device is a necessary component.  The 'stm\_source' can be connected/disconnected to/from an STM device at runtime via sysfs interface, and writing to 'stm\_source' actually ends up in the stm device that connected with it (finally ends up in the sink device which connected with STM device).  All registered stm\_source devices can be found under ‘/sys/class/stm\_source/’. + +An important element of stm\_source is 'stm\_source\_data', it includes two necessary properties which must be initialized before registering stm\_source device: + +* stm\_source name - it is just the file node name in sysfs. +* channel numbers - it means this stm\_source requests to be allocated how many channels when linking stm\_source with STM device, then the driver of STM framework will look up and allocate this quantity of available channels for the stm\_source according to STM master/channel management policy. + +## STM master/channel management policy + +The stm\_source class has a set of masters and channels allocation and management policy.  Let’s go through the details from three parts below: + +### 1. Allocate a range of channels from one master for stm\_source + +![figure 1 stm blog](/linaro-website/images/blog/figure-1-stm-blog) + +Like the Figure-1 is showing, when linking stm\_source with STM device, the program will poll all masters from either + +1\). the start master configured in the file named "masters" under the policy rule directory if there’s a policy rule under  */config/stp-policy/* which has the same name with the stm\_source class device, + +Or + +2\). the struct stm\_data::sw\_start which is configured in the initialization of STM device driver, + +and then to see if there are free channels on the current first available master and the number of free continuous channels on this master must be larger than or equal to the quantity of required channels. The first eligible master and the range of channels will be allocated to this stm\_source class device as its stimulus output ports. + +### 2. Create policy rules on target + +Mount the configfs at run time with: + +``` +mount -t configfs none /config +``` + +The directory 'stp-policy/' would appear under 'config/' then. + +Create policy rule for given STM device like below: + +``` +mkdir /config/stp-policy/10006000.stm.xyz +``` + +Here ‘10006000.stm’ as an example is an STM device name which this policy applies to. ‘10006000.stm’ must be the same with an STM device name which can be found under /dev directory.  ‘xyz’ is an arbitrary string without dot, it is necessary and would be separated with device by a dot that’s why no dot is allowed in the arbitary string.  Neither ‘*10006000.stm.xyz.abc’* nor ‘*10006000.stm’* is a valid name for policy rules. + +Create policy rules for a given stm\_source class device like below: + +``` +mkdir /config/stp-policy/10006000.stm.my_policy/ftrace +``` + +Here ‘ftrace’ is a registered device of stm\_source class. It can be used to write trace data into STM device and finally end up in the sink device once linked. Note that the rule's name must be same with the name of stm\_source class device since we are creating policy for the given stm\_source. + +After created policy rule, there will be two files 'mastesr' and 'channels' under the directory of this rule, for example: + +``` +# cat /config/stp-policy/10006000.stm.my_policy/ftrace/masters +0 127 +# cat /config/stp-policy/10006000.stm.my_policy/ftrace/channels +0 65535 +``` + +These values mean the range of masters/channels which can be used on the stm\_source device whose name is the same with the rule's name (i.e. ftrace in this case), the default values come from the configuration \[3] of STM device (i.e. 10006000.stm in this case) + +The files masters/channels are configurable and the rule would be applied on the stm\_source class device when being linked with any STM device. + +### 3. Allocate master/channels for applications + +1. Set the policy rule via ioctl() interface of STM device.  One rule would include the allocated master, the first assigned channel, the number of required channels.  More details about this would be introduced in the following section **Mapping STM to user-space.** +2. If an application program doesn't set policy rule for itself, when the application writing data to STM via the system call - write() - of STM device, a rule named “default” will be applied, if the “default” policy cannot be found either, like what I wrote above, the initialized configuration of STM device will be applied. + +## Mapping STM to user-space + +Mapping STM stimulus ports area into user space is another usage of STM.  The patch \[4] added supporting mmap for CoreSight STM should be in kernel 4.9.  A sample program of this can be found here \[6] which was tested on Spreadtrum’s SC9836 \[2] and Arm’s Juno platform.  It maps a page of channels (i.e. 16) to user space, and writes a set of given specific data to the mmap’ed area. + +In this sample program, the size of mapped memory must be a multiple of page-size, so the user programs have to map many channels at one time, that’s saying if each channel takes 256 bytes and hardware page size is 4096 bytes, the users have to map at least 16 channels at one time.  To be sure which channels would be mmap’ed, the program has to set a policy of channels allocation for STM device before doing mapping activity. + +## CoreSight STM specificity on masters + +Different from Intel STH (Software Trace Hub), masters on CoreSight STM are not under software control, but have a hardwired association with processors, every processor connected with CoreSight STM in system has two master IDs for secure and non-secure modes.  When decoding CoreSight STM trace data, we can easily know which processor the trace comes from by master IDs.  Table-1 shows an example of part masters allocation on Juno. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Processors + +master ID for + +secure accesses + + +master ID for + +non-secure accesses + +
+Cortex-A57 core 0 + +0 + +64 +
+Cortex-A57 core 1 + +1 + +65 +
+Cortex-A53 core 0 + +4 + +68 +
+Cortex-A53 core 1 + +5 + +69 +
+Cortex-A53 core 2 + +6 + +70 +
+Cortex-A53 core 3 + +7 + +71 +
+Mali-T624 GPU + +32 + +96 +
+Cortex-M3 SCP + +33 + +97 +
+Expansion master 0 + +34 + +98 +
+Default master + +62 + +126 +
+DAP-AXI-AP + +63 + +127 +
+Table-1 CoreSight STM masters allocation on Juno + +## Decoding traces with OpenCSD1 library + +Once the traces have been exported via STM, we can simply dump the traces from the sink device connected with STM in CoreSight system with ‘dd’ command, for example on the platform \[2], we use ETF as the sink device, the command would look like: + +``` +# dd if=/dev/10003000.etf of=cstraceitm.bin +[  308.645119] coresight-tmc 10003000.etf: TMC read start +[  308.651724] coresight-tmc 10003000.etf: TMC read end +64+0 records in +64+0 records out +32768 bytes (32.0KB) copied, 0.012942 seconds, 2.4MB/s +# ls -la cstraceitm.bin +-rw-r--r-- 1 0  0   32768 Jan  1 00:05 cstraceitm.bin +``` + +The output filename should be ‘cstraceitm.bin’ which matches the configuration of OpenCSD decoding library.  It stores the raw trace data which can be decoded by the decoding library. + +Download the openCSD source code from \[7], and build it according to the direction of “Off Target OpenCSD Compilation” on HOWTO.md, then you will see: + +``` +opencsd/decoder$ ls lib/linux64/dbg/ +libcstraced.a  libcstraced_c_api.a  libcstraced_c_api.so  libcstraced.so +opencsd/decoder$ ls tests/bin/linux64/dbg/ +libcstraced_c_api.so  libcstraced.so  simple_pkt_print_c_api  trc_pkt_lister +``` + +The ‘trx\_pkt\_lister’ is the program for decoding STM traces, copy the trace binary got with ‘dd’ command above  (i.e. ‘cstraceitm.bin’) to the decoding test snapshots directory, then we can decode the traces: + +``` +opencsd/decoder$ cp cstraceitm.bin tests/snapshots/stm_only/ +opencsd/decoder$ export LD_LIBRARY_PATH=./lib/linux64/dbg/ +opencsd/decoder$ ./tests/bin/linux64/dbg/trc_pkt_lister -ss_dir tests/snapshots/stm_only/ -src_name ETB_1 | vim - +``` + +The decoded trace data will look like below: + +``` +Trace Packet Lister: CS Decode library testing +-----------------------------------------------_ + +Library Version : 0.003 + +Test Command Line:- +./tests/bin/linux64/dbg/trc_pkt_lister   -ss_dir  tests/snapshots/stm_only/  -src_name  ETB_1_ + +Trace Packet Lister : reading snapshot from path tests/snapshots/stm_only/ +Using ETB_1 as trace source +Trace Packet Lister : STM Protocol on Trace ID 0x1 +Idx:0; ID:20;   ASYNC:Alignment synchronisation packet. +Idx:11; ID:20;  VERSION:Version packet.; Ver=3 +Idx:13; ID:20;  FREQ:Frequency packet.; Freq=0Hz +Idx:21; ID:20;  M8:Set current master.; Master=0x41 +Idx:22; ID:20;  D32TS:32 bit data; with timestamp.; Data=0x55555555; TS=0x00000000E05CF92E ~[0xE05CF92E] +Idx:37; ID:20;  D32:32 bit data.; Data=0xaaaaaaaa +Idx:41; ID:20;  D32:32 bit data.; Data=0x66666666 +Idx:46; ID:20;  D32:32 bit data.; Data=0x99999999 +Idx:51; ID:20;  FLAG:Flag packet. +ID:20   END OF TRACE DATA +Trace Packet Lister : Trace buffer done, processed 32768 bytes. +``` + +The above trace data was just decoded from the sample program \[6], the red numbers were just what the sample program wrote.  From the decoded trace data, we can see these traces came from master 0x41 of STM whose trace id is 0x1.  Each trace package has a timestamp, and ends up with a flag packet. + +## Final words + +I hope this post provided an useful introduction to STM and presented how to use it clearly.  One work related to STM is still ongoing,  which I have been doing - Ftrace integration with STM, function traces can be exported via STM once this work is done.  At this moment the 5th iteration \[8] for this feature has been released. + +\[1]\(https://elixir.bootlin.com/linux/v4.7/source/drivers/hwtracing/stm/console.c) + +\[2]\(https://elixir.bootlin.com/linux/v4.3/source/arch/arm64/boot/dts/sprd/sc9836.dtsi#L178) + +\[3]\(https://elixir.bootlin.com/linux/latest/source/drivers/hwtracing/coresight/coresight-stm.c) + +\[4]\(https://patchwork.kernel.org/patch/9189197/) + +\[5]\() branch stm-write-sample + +\[6]\() branch stm-mmap-sample + +\[7]\(https://github.com/Linaro/OpenCSD) + +\[8]\(https://lkml.org/lkml/2016/8/30/83) + +*** + +1. OpenCSD is an open source CoreSight Trace Decode library \[7], there are two articles introduced OpenCSD in Linaro Core Dump diff --git a/src/content/blogs/suspend-to-idle.mdx b/src/content/blogs/suspend-to-idle.mdx new file mode 100644 index 0000000..0de44a7 --- /dev/null +++ b/src/content/blogs/suspend-to-idle.mdx @@ -0,0 +1,143 @@ +--- +author: andy-gross +date: 2016-10-18T19:05:19.000Z +description: > + In this article, Andy Gross focuses on the implementation of suspend to idle, + which is a software implemented sleep state. Read about his findings here! +link: /blog/suspend-to-idle/ +title: Suspend to Idle +tags: + - linux-kernel +related: [] + +--- + +## Introduction + +The Linux kernel supports a variety of sleep states. These states provide power savings by placing the various parts of the system into low power modes. The four sleep states are suspend to idle, power-on standby (standby), suspend to ram, and suspend to disk. These are also referred to sometimes by their ACPI state: S0, S1, S3, and S4, respectively. Suspend to idle is purely software driven and involves keeping the CPUs in their deepest idle state as much as possible. Power-on standby involves placing devices in low power states and powering off all non-boot CPUs. Suspend to ram takes this further by powering off all CPUs and putting the memory into self-refresh. Lastly, suspend to disk gets the greatest power savings through powering off as much of the system as possible, including the memory. The contents of memory are written to disk, and on resume this is read back into memory. + +This blog post focuses on the implementation of suspend to idle. As described above, suspend to idle is a software implemented sleep state. The system goes through a normal platform suspend where it freezes the user space and puts peripherals into low-power states. However, instead of powering off and hotplugging out CPUs, the system is quiesced and forced into an idle cpu state. With peripherals in low power mode, no IRQs should occur, aside from wake related irqs. These wake irqs could be timers set to wake the system (RTC, generic timers, etc), or other sources like power buttons, USB, and other peripherals. + +During freeze, a special cpuidle function is called as processors enter idle. This enter\_freeze() function can be as simple as calling the cpuidle enter() function, or can be much more complex. The complexity of the function is dependent on the SoCs requirements and methods for placing the SoC into lower power modes. + +## Prerequisites + +### Platform suspend\_ops + +Typically, to support S2I, a system must implement a platform\_suspend\_ops and provide at least minimal suspend support. This meant filling in at least the valid() function in the platform\_suspend\_ops. If suspend-to-idle and suspend-to-ram was to be supported, the suspend\_valid\_only\_mem would be used for the valid function. + +Recently, however, automatic support for S2I was added to the kernel. Sudeep Holla proposed a change that would provide S2I support on systems without requiring the implementation of platform\_suspend\_ops. This patch set was accepted and will be part of the 4.9 release. The patch can be found at: [https://lkml.org/lkml/2016/8/19/474](https://lkml.org/lkml/2016/8/19/474) + +With suspend\_ops defined, the system will report the valid platform suspend states when the /sys/power/state is read. + +``` +# cat /sys/power/state +freeze mem +``` + +This example shows that both S0 (suspend to idle) and S3 (suspend to ram) are supported on this platform. With Sudeep’s change, only freeze will show up for platforms which do not implement platform\_suspend\_ops. + +### Wake IRQ support + +Once the system is placed into a sleep state, the system must receive wake events which will resume the system. These wake events are generated from devices on the system. It is important to make sure that device drivers utilize wake irqs and configure themselves to generate wake events upon receiving wake irqs. If wake devices are not identified properly, the system will take the interrupt and then go back to sleep and will not resume. + +Once devices implement proper wake API usage, they can be used to generate wake events. Make sure DT files also specify wake sources properly. An example of configuring a wakeup-source is the following (arch/arm/boot/dst/am335x-evm.dts): + +``` + gpio_keys: volume_keys@0 { +               compatible = "gpio-keys"; +               #address-cells = <1>; +               #size-cells = <0>; +               autorepeat; + +               switch@9 { +                       label = "volume-up"; +                       linux,code = <115>; +                       gpios = <&gpio0 2 GPIO_ACTIVE_LOW>; +                       wakeup-source; +               }; + +               switch@10 { +                       label = "volume-down"; +                       linux,code = <114>; +                       gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; +                       wakeup-source; +               }; +       }; + +``` + +As you can see, two gpio keys are defined to be wakeup-sources. Either of these keys, when pressed, would generate a wake event during suspend. + +An alternative to DT configuration is if the device driver itself configures wake support in the code using the typical wakeup facilities. + +## Implementation + +### Freeze function + +Systems should define a enter\_freeze() function in their cpuidle driver if they want to take full advantage of suspend to idle. The enter\_freeze() function uses a slightly different function prototype than the enter() function. As such, you can't just specify the enter() for both enter and enter\_freeze. At a minimum, it will directly call the enter() function. If no enter\_freeze() is specified, the suspend will occur, but the extra things that would have occurred if enter\_freeze() was present, like tick\_freeze() and stop\_critical\_timings(), will not occur. This results in timer IRQs waking up the system. This will not result in a resume, as the system will go back into suspend after handling the IRQ. + +During suspend, minimal interrupts should occur (ideally none). + +The picture below shows a plot of power usage vs time. The two spikes on the graph are the suspend and the resume. The small periodic spikes before and after the suspend are the system exiting idle to do bookkeeping operations, scheduling tasks, and handling timers. It takes a certain period of time for the system to go back into the deeper idle state due to latency. + +**Power Usage Time Progression** + +The ftrace capture shown below displays the activity on the 4 CPUs before, during, and after the suspend/resume operation. As you can see, during the suspend, no IPIs or IRQs are handled. + +![Blog Picture 2](/linaro-website/images/blog/blog-picture-2) + +**Ftrace capture of Suspend/Resume** + +### Idle State Support + +You must determine which idle states support freeze. During freeze, the power code will determine the deepest idle state that supports freeze. This is done by iterating through the idle states and looking for which states have defined enter\_freeze(). The cpuidle driver or SoC specific suspend code must determine which idle states should implement freeze and it must configure them by specifying the freeze function for all applicable idle states for each cpu. + +As an example, the Qualcomm platform will set the enter\_freeze function during the suspend init function in the platform suspend code. This is done after the cpuidle driver is initialized so that all structures are defined and in place. + +### Driver support for Suspend/Resume + +You may encounter buggy drivers during your first successful suspend operation. Many drivers have not had robust testing of suspend/resume paths. You may even find that suspend may not have much to do because pm\_runtime has already done everything you would have done in the suspend. Because the user space is frozen, the devices should already be idled and pm\_runtime disabled. + +## Testing + +Testing for suspend to idle can be done either manually, or through using something that does an auto suspend (script/process/etc), auto sleep or through something like Android where if a wakelock is not held the system continuously tried to suspend. If done manually, the following will place the system in freeze: + +``` +/ # echo freeze > /sys/power/state +[ 142.580832] PM: Syncing filesystems ... done. +[  142.583977] Freezing user space processes ... (elapsed 0.001 seconds) done. +[  142.591164] Double checking all user space processes after OOM killer disable... (elapsed 0.000 seconds) +[  142.600444] Freezing remaining freezable tasks ... (elapsed 0.001 seconds) done. +[  142.608073] Suspending console(s) (use no_console_suspend to debug) +[  142.708787] mmc1: Reset 0x1 never completed. +[  142.710608] msm_otg 78d9000.phy: USB in low power mode +[  142.711379] PM: suspend of devices complete after 102.883 msecs +[  142.712162] PM: late suspend of devices complete after 0.773 msecs +[  142.712607] PM: noirq suspend of devices complete after 0.438 msecs +< system suspended > +…. +< wake irq triggered > +[  147.700522] PM: noirq resume of devices complete after 0.216 msecs +[  147.701004] PM: early resume of devices complete after 0.353 msecs +[  147.701636] msm_otg 78d9000.phy: USB exited from low power mode +[  147.704492] PM: resume of devices complete after 3.479 msecs +[  147.835599] Restarting tasks ... done. +/ # +``` + +In the above example, it should be noted that the MMC driver was responsible for 100ms of that 102.883ms. Some device drivers will still have work to do when suspending. This may be flushing of data out to disk or other tasks which take some time. + +If the system has freeze defined, it will try to suspend the system. If it does not have freeze capabilities, you will see the following: + +``` +/ # echo freeze > /sys/power/state +sh: write error: Invalid argument +/ # +``` + +## Future Developments + +There are two areas where work is currently being done on Suspend to Idle on Arm platforms. The first area was mentioned earlier in the platform\_suspend\_ops prerequisite section. The work to always allow for the freeze state was accepted and will be part of the 4.9 kernel. The other area that is being worked on is the freeze\_function support. + +The freeze\_function implementation is currently required if you want the best response/performance. However, since most SoCs will use the Arm cpuidle driver, it makes sense for the Arm cpuidle driver to implement its own generic freeze\_function. And in fact, Arm is working to add this generic support. A SoC vendor should only have to implement specialized freeze\_functions if they implement their own cpuidle driver or require additional provisioning before entering their deepest freezable idle state. diff --git a/src/content/blogs/sve-in-qemu-linux-user.mdx b/src/content/blogs/sve-in-qemu-linux-user.mdx new file mode 100644 index 0000000..ae3acfa --- /dev/null +++ b/src/content/blogs/sve-in-qemu-linux-user.mdx @@ -0,0 +1,228 @@ +--- +author: alex-bennee +published: true +title: ARM SVE Support in QEMU's Latest Linux-User Mode +description: In this article, Alex Bennée takes a detailed look at the ARM SVE + (Scalable Vector Extension) support in the latest version of QEMU. Read more + here! +date: 2018-07-17T09:00:00.000Z +image: linaro-website/images/blog/Banner_Virtualization +tags: + - arm + - qemu +related: [] + +--- + +Arm’s innovative [Scalable Vector Extension](https://community.arm.com/processors/b/blog/posts/technology-update-the-scalable-vector-extension-sve-for-the-armv8-a-architecture) instructions are a new set of instructions designed for data-heavy supercomputing applications. Superficially they are similar to the existing NEON/AdvSIMD instructions in that they allow you to exploit [data parallelism](https://en.wikipedia.org/wiki/Data_parallelism) in algorithms by executing several identical operations at the same time over multiple lanes of a vector register. Where SVE differs from traditional vector processing is that the number of lanes are implementation defined. The clever part comes from a programming model that means assumptions about the number of lanes are not hard coded into the binaries. This means the same binary that runs on a mobile SoC, with a small number of lanes, can automatically take advantage of the wider lanes on a HPC supercomputing cluster, without recompiling the program. + +The SVE instruction set also provides some additional features which are worth taking advantage of. These include a fault handling mechanism called the First Fault Register which allows you to defer expensive calculations to avoid crossing page boundaries and instead handle reaching non-accessible pages as just another boundary condition for your loop. Another feature of note is scatter/gather load/store support which allows complex structures to be quickly loaded into registers in fewer instructions. + +While SVE enabled hardware is on its way, it’s going to be a while before anyone outside of a silicon lab can get their hands on real hardware. In the meantime, software developers want to be able to port and test their software now so they can be ready for the arrival of real SVE enabled chips. With the release of QEMU 3.0, they will be able to do just that. Thanks to QEMU's linux-user emulation, we now have the ability to develop and test userspace Arm binaries utilising SVE. + +If you want to learn more about the history of vector processing and the implications it has for dynamic binary translation, watch the [talk I gave at KVM Forum last year](https://www.youtube.com/watch?v=IYHTwnde0g8). + +If you are ready to start experimenting with these instructions then read on. + +## Prerequisites + +These instructions currently assume you are running a Linux x86\_64 development environment. This will soon be available in the up-coming QEMU 3.0 release but currently you can use a fresh checkout of the [master repository](https://git.qemu.org/?p=qemu.git;a=summary). + +You will also need to have a working Docker setup as we are going to be hosting our development environment inside a docker container. Ideally it should be setup in a developer friendly way so the main user account can run docker without a password. The QEMU project [has some notes on potential docker setups](https://git.qemu.org/?p=qemu.git;a=blob;f=docs/devel/testing.rst;h=f33e5a84234373d100d957d990e7a28ade2922f9;hb=HEAD#l249). + +We’ll be using the kernel binfmt\_misc support to automatically run Aarch64 linux binaries with QEMU. As the configuration is system wide, we need to configure it on the host. On Debian based systems it is usually enough to install the “qemu-user” package which will ensure everything is set up for you. On other systems you can run the helper script in the QEMU source tree: + +```bash +$ sudo ./scripts/qemu-binfmt-conf.sh --qemu-path /usr/bin +Setting /usr/bin/qemu-alpha as binfmt interpreter for alpha +Setting /usr/bin/qemu-arm as binfmt interpreter for arm +... +Setting /usr/bin/qemu-aarch64 as binfmt interpreter for aarch64 +Setting /usr/bin/qemu-aarch64_be as binfmt interpreter for aarch64_be +.. +Setting /usr/bin/qemu-microblaze as binfmt interpreter for microblaze +Setting /usr/bin/qemu-microblazeel as binfmt interpreter for microblazeel +``` + +It is important that the interpreter for aarch64 binaries points at /usr/bin/qemu-aarch64 as this is where we will be installing the QEMU support binary in our docker images. + +## Building QEMU + +So the first thing we need to do is build a statically built version of the qemu-aarch64 binary. + +```bash +./configure --target-list=aarch64-linux-user --static +``` + +And then build: + +```bash +make -j8 +``` + +The -j option specifies how many units to compile at a time, typically you set it to the number of cores your system has. If you are running on a beefy server hardware, feel free to crank the number higher ;-) + +## Creating the base Docker Image + +The next thing we are going to do is create the base docker image. This will be a complete AArch64 rootfs with basic tools and the ability to install more software. In this case I’ve chosen Ubuntu’s Bionic Beaver but you can bootstrap any Debian based environment. Importantly, we will want to install gcc-8 as that is the first version of gcc that can compile SVE enabled binaries so in Debian you would want to install the rolling “testing” release. + +Fortunately all the details of the build are hidden behind QEMU’s build system. So we just need to execute this rather long make invocation: + +```bash +make docker-binfmt-image-debian-ubuntu-bionic-arm64 \ + DEB_ARCH=arm64 DEB_TYPE=bionic DEB_URL=http://ports.ubuntu.com \ + EXECUTABLE=./aarch64-linux-user/qemu-aarch64 V=1 +``` + +The V=1 will show you what’s going on under the hood as the bootstrapping process will take a while to complete. Once it has completed there should be an image in your local docker repository tagged qemu:debian-ubuntu-bionic-arm64. We can run it to verify that everything worked ok: + +```bash +$ docker run --rm -it qemu:debian-ubuntu-bionic-arm64 /bin/bash +root@e68be4cb7b0f:/# uname -a +Linux e68be4cb7b0f 4.15.0-23-generic #25-Ubuntu SMP Wed May 23 18:02:16 UTC 2018 aarch64 aarch64 aarch64 GNU/Linux +root@e68be4cb7b0f:/# exit +``` + +While uname reports the host kernel version, as far as the binaries are concerned they are running on an AArch64 machine. Another cool aspect about QEMU’s docker support is that it has automatically created a user in the container that is mapped to the current user on the host. This allows us to spin up a container process with the users privileges. This is useful when combined with dockers [volume mounts](https://docs.docker.com/storage/volumes/) to allow access to the host file-system as it means any files will be owned by the user and not the all powerful "root" user. + +```bash +$ docker run --rm -it -u $(id -u) -v $(pwd):$(pwd) -w $(pwd) qemu:debian-ubuntu-bionic-arm64 /bin/bash +alex.bennee@01cfe5adbbec:~/lsrc/qemu.git$ whoami +alex.bennee +``` + +## Setting up our Development Environment + +Now we have a working base-system, we can use this to create a new Docker image with all the tools we need to experiment with SVE. Up until this point we have been running with the --rm flag which tells docker to throw away any data inside the container when we are done. For the next set of steps we will run without the flag so we can save the final state of the container as a new image for later use. In this example we demonstrate this manually but most docker image creation is scripted through Dockerfiles and is good practice, like for example [QEMU’s aarch64 cross compiler image](https://git.qemu.org/?p=qemu.git;a=blob;f=tests/docker/dockerfiles/debian-arm64-cross.docker;h=877d863475ac81e4e2faf3e4198250ec5be820f4;hb=HEAD). + +So let’s get our image set up: + +```bash +$ docker run -it qemu:debian-ubuntu-bionic-arm64 /bin/bash +root@c4dc9b5426ad:/# sed -i 's/main/main universe/' /etc/apt/sources.list +root@c4dc9b5426ad:/# apt update +root@c4dc9b5426ad:/# apt install -y gcc-8 g++-8 wget libtool autoconf libtool gdb less +root@c4dc9b5426ad:/# update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 1000 +root@c4dc9b5426ad:/# update-alternatives --install /usr/bin/++ g++ /usr/bin/g++-8 1000 +root@c4dc9b5426ad:/# gcc --version +gcc (Ubuntu 8-20180414-1ubuntu2) 8.0.1 20180414 (experimental) [trunk revision 259383] +... +root@c4dc9b5426ad:/# exit +``` + +We have done the following things: + +1. Added the universe repository (for gcc/g++-8) +2. Updated the repo lists +3. Installed gcc-8 and some other useful tools +4. Updated the default gcc to use gcc-8 and confirmed the switch + +We now want to save this container as an image we can re-use. First we need to identify the container we have just run: + +```bash +$ docker ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c4dc9b5426ad qemu:debian-ubuntu-bionic-arm64 "/bin/bash" 2 hours ago Exited (0) 2 seconds ago adoring_goodall +e174632927ba 238519b386bc "/bin/sh" 5 hours ago Exited (0) 5 hours ago friendly_mayer +ad33c7bc7558 0da2cdd3455f "/bin/sh" 8 hours ago Exited (0) 8 hours ago silly_noether +... +``` + +The first entry on the list is the one we have justed exited, so we commit that as a new image: + +```bash +$ docker commit -m "setup arm64 env" adoring_goodall development:bionic-arm64-sve +sha256:25b770e5ce8a5b55ebbccad3b90b58a3474c4acdc5a70ca8ad42fdaf9f273f53 +``` + +The sha256 is the new image id, but we have tagged it as development:bionic-arm64-sve so we can use a friendly name in the future. + +## Taking it for a Spin + +Now we have a development environment, how do we use it? For this example I’m going to use the Cortex Strings library. This is a staging ground for various accelerated library functions using NEON, AdvSIMD and SVE instructions. We shall be building it with SVE enabled. + +First off all check out the git repository on the host system: + +```bash +$ git clone https://git.linaro.org/toolchain/cortex-strings.git cortex-strings.git +$ cd cortex-strings.git +``` + +We shall now start our development container as the user with our host path mounted: + +```bash +$ docker run --rm -it -u $(id -u) -v $(pwd):$(pwd) -w $(pwd) development:bionic-arm64-sve /bin/bash +``` + +The remaining steps are run inside our container: + +```console +user@container:~/cortex-strings.git $ ./autogen.sh +user@container:~/cortex-strings.git $ ./configure --with-sve --enable-static --disable-shared +user@container:~/cortex-strings.git $ make -j +user@container:~/cortex-strings.git $ make check -j +``` + +At the end you should be presented with the result of the self-test: + +```console +PASS: tests/test-memset +PASS: tests/test-strnlen +PASS: tests/test-strlen +PASS: tests/test-memchr +PASS: tests/test-strcpy +PASS: tests/test-strchr +PASS: tests/test-memmove +PASS: tests/test-strrchr +PASS: tests/test-memcmp +PASS: tests/test-memcpy +PASS: tests/test-strncmp +PASS: tests/test-strcmp +====================================================================== +Testsuite summary for cortex-strings 1.1-2012.06~dev +====================================================================== +# TOTAL: 12 +# PASS: 12 +# SKIP: 0 +# XFAIL: 0 +# FAIL: 0 +# XPASS: 0 +# ERROR: 0 +====================================================================== +``` + +Not sure we have just run SVE enabled code? Let’s examine it with gdb: + +```console +user@container:~/cortex-string.git $ gdb tests/test-strcpy +GNU gdb (Ubuntu 8.1-0ubuntu3) 8.1.0.20180409-git +... +Reading symbols from tests/test-strcpy...done. +(gdb) disassemble strcpy +Dump of assembler code for function strcpy: + 0x00000000000023c0 <+0>: setffr + 0x00000000000023c4 <+4>: ptrue p2.b + 0x00000000000023c8 <+8>: mov x2, #0x0 // #0 + 0x00000000000023cc <+12>: nop + 0x00000000000023d0 <+16>: ldff1b {z0.b}, p2/z, [x1, x2] + 0x00000000000023d4 <+20>: rdffrs p0.b, p2/z + 0x00000000000023d8 <+24>: b.cs 0x23f0 // b.hs, b.nlast + 0x00000000000023dc <+28>: cmpeq p1.b, p2/z, z0.b, #0 + 0x00000000000023e0 <+32>: b.ne 0x2408 // b.any + 0x00000000000023e4 <+36>: st1b {z0.b}, p2, [x0, x2] + 0x00000000000023e8 <+40>: incb x2 + 0x00000000000023ec <+44>: b 0x23d0 + 0x00000000000023f0 <+48>: cmpeq p1.b, p0/z, z0.b, #0 + 0x00000000000023f4 <+52>: b.ne 0x2408 // b.any + 0x00000000000023f8 <+56>: setffr + 0x00000000000023fc <+60>: st1b {z0.b}, p0, [x0, x2] + 0x0000000000002400 <+64>: incp x2, p0.b + 0x0000000000002404 <+68>: b 0x23d0 + 0x0000000000002408 <+72>: brka p0.b, p2/z, p1.b + 0x000000000000240c <+76>: st1b {z0.b}, p0, [x0, x2] + 0x0000000000002410 <+80>: ret +End of assembler dump. +(gdb) +``` + +And there you have it. You can see predicate instructions `ptrue`, vector loads `ldff1b {z0.b}` and resetting of the first fault register `setffr`. diff --git a/src/content/blogs/system-on-module-specifications~.mdx b/src/content/blogs/system-on-module-specifications~.mdx new file mode 100644 index 0000000..a643c2b --- /dev/null +++ b/src/content/blogs/system-on-module-specifications~.mdx @@ -0,0 +1,32 @@ +--- +author: linaro +date: 2017-12-22T12:00:00.000Z +description: 96Boards steering committee are working on new System-on-Module + (SOM) specifications and they are inviting interested parties to participate + in the finalization of the specifications. +link: /news/system-on-module-specifications/ +title: Linaro Invites Input into 96Boards System-on-Module (SOM) specifications +tags: [] +related: [] + +--- + +\[Cambridge UK, 22 December 2017] Linaro Ltd, the [open source collaborative engineering organization](/) developing software for the Arm® ecosystem, today announced that members of the [96Boards steering committee](https://www.96boards.org/about/) are working on new **System-on-Module (SOM) specifications** and they are inviting interested parties to participate in the finalization of the specifications. + +96Boards is Linaro’s initiative to build a single software and hardware community across [low-cost development boards based on Arm technology](https://www.96boards.org/). Since 96Boards was announced in 2015, members and other companies have been looking at ways to effectively mass produce prototypes created with 96Boards platforms. Some member companies have worked with third parties to develop proprietary SOM solutions and these have sold in high volume, but without leveraging all the software and hardware work done on the prototype 96Boards-based prototype and with no compatibility across SoCs. + +SOM solutions today use a variety of different connector solutions including SO-DIMM connectors used in DRAM and Mini Module Plus (MMP) connectors for certain specialist boards, but there is no agreed standard that provides flexible IO and a robust mounting mechanism. In addition, there is no standard form factor. The new 96Boards SOM specifications aim to address this lack of a standard by producing a general purpose SOM platform that will enable plug and play compatibility between a whole range of different SOM solutions. + +There are currently two SOM specifications under development: + +* The Compute Module Specification defines a SOM with generic module-to-carrier board interface, independent of the specific SoC choice on the module. The Compute module addresses the application requirements of segments including industrial automation, smart devices, gateway systems, automotive, medical, robotics and retail POS systems. +* The Wireless specification designs a SOM for interchangeable wireless module applications supporting standard and/or proprietary wireless standards such as 802.15.4, BLE, WiFi, LoRa, NB-IoT, LTE-M etc. The specification is designed to enable evolution that will support multiple products and future wireless standards. + +Both specifications encourage the development of reliable and cost-effective embedded platforms for building end-products. + +The initial drafts of the specifications have been created by the current steering committee members and reviewing began today. The 96Boards steering committee includes SoC, SOM, and cloud and software service companies. Companies interested in providing input into the current version of the specifications and/or becoming a part of the steering committee are invited to contact 96Boards@Linaro.org. The final specification will be launched at Linaro Connect in Hong Kong on 19 March 2018. + +**About Linaro** +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96Boards.org). diff --git a/src/content/blogs/tee-development-with-no-hardware-is-that-possible.mdx b/src/content/blogs/tee-development-with-no-hardware-is-that-possible.mdx new file mode 100644 index 0000000..6356a1e --- /dev/null +++ b/src/content/blogs/tee-development-with-no-hardware-is-that-possible.mdx @@ -0,0 +1,39 @@ +--- +excerpt: Read about System Trace Module (STM) which can not only collect trace + data from software sources, but also monitor hardware events. Learn how to + write traces to STM and how many approaches to do this, etc. +title: TEE Development With No Hardware - Is That Possible? +description: In this article, Joakim Bech questions whether you actually need + hardware for TEE development. Read more on his findings here! +image: linaro-website/images/blog/Banner_Security +author: joakim-bech +date: 2016-11-28T17:06:06.000Z +tags: + - linux-kernel + - qemu +link: /blog/tee-development-with-no-hardware-is-that-possible/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner](/linaro-website/images/blog/core-dump) + +It is a well-known fact that it has been hard to get started with TEE development for a couple of reasons. For example, it has been hard to get access to the software because in the past TEE software has typically been proprietary and therefore kept within the company or under a non-disclosure agreement. On the hardware side it hasn’t been much better, and even today it is still hard to find hardware readily available for TEE development, at least if you intend to make a completely secure product. So wouldn’t it be great if we could emulate it all on a local desktop? The question is whether you actually need hardware for TEE development. As it turns out, QEMU, the machine emulator that can emulate a multitude of CPUs, officially received TrustZone support at the beginning of this year and QEMU currently supports TrustZone on both Armv7-A and Armv8-A architecture. But just the support in QEMU isn’t enough: you will still need the software for the TEE. + +A couple of years ago Linaro, together with STMicroelectronics, teamed up and reworked STMicroelectronics proprietary TEE. That work resulted in an Open Source TEE called OP-TEE. That project has been hosted on GitHub ([https://github.com/OP-TEE](https://github.com/OP-TEE)) since the summer of 2014. Initially it came with board support for devices coming from STMicroelectronics. But since then members of Linaro and other companies have started to use OP-TEE and today there are roughly twenty different platforms officially supported by OP-TEE. Quite early into the development of OP-TEE, Linaro added support for running OP-TEE on QEMU (Armv7-A). Back then there were no TrustZone patches in upstream QEMU. Because of that, we were running on a fork of QEMU for quite a while. The fork contained a set of TrustZone patches that later on went into the official QEMU tree. A major reason why we did the port at an early stage was simply due to the fact that it was hard to obtain hardware back then. We could use the board from STMicroelectronics but, although it was a good development board, it had a form factor that made it hard to bring it with you, and was (and still is) hardware that is not publicly available. Today things look much better. People can choose between a variety of devices, like HiKey from Hisilicon, Raspberry Pi 3 and some devices from Freescale and TI and Xilinx. Still, with the ability to use real hardware, we haven’t let QEMU go away. + +So why on earth would you want to write code to be used in a secure environment in something that isn’t secure? As it turns out, it is very convenient to use QEMU for quite a lot of the work we are doing and the turnaround time is kept to a minimum. No cables to plug and pull, no memory cards to update, no mmc to flash and, as a bonus, the GDB debugger works without the need for any modifications. All in all, you have all the tools you need running on a single computer and it doesn’t cost you anything! So how does it compare with running the code on real hardware? What is the main difference and what about the interfaces? Let’s first consider the OP-TEE boot. The Armv8-A QEMU setup reminds more of a true boot scenario compared to Armv7-A on QEMU. But in both cases, when running QEMU with OP-TEE, secure boot is not enabled. Having said that, there is nothing really preventing you from implementing a chain of trust, although since the boot flow is a bit different compared to other devices it doesn’t make much sense to spend time on implementing chain of trust, since it will most likely be something that won’t be used in other setups. + +When the system is up and running it is another situation. There the system behaves more or less in the same way as running on real hardware. The biggest limitation using QEMU is that some peripherals might not be emulated and some low level functionality might not be fully supported (CPU caches etc). But for developing and debugging the core of the TEE (kernel mode in secure world) you can do almost everything and, most of the time, the changes you have made will work when you have compiled for another platform and try it out on real hardware. + +What about the Trusted Applications? If you have some prior knowledge about the GlobalPlatform Internal Core API specification, you know that this specification specifies how to deal with cryptographic operations, secure storage, secure time and how to work the arithmetical (big number) operations. So the question is: do the Trusted Applications need some special treatment to work with QEMU? The short answer is no. Everything but secure time works just fine, and secure time is heavily platform dependent regardless if you’re running QEMU or not. The cryptographic operations run using a software implementation and secure storage uses whatever root file system you have decided to use. We typically use a initramfs based file system. Everything is transparent to the one writing the Trusted Application. The important thing is that Trusted Applications being developed for use with OP-TEE should be written so they use and follow the rules in the API specified by the GlobalPlatform TEE Internal Core API specification. By doing so, there is no need to make any changes to the source code at all when building for different platforms and devices. In fact, by following a standard strictly, I believe it is possible to take the source code as it is and compile it using another TEE vendors’ SDKs and development kits without having to make many changes at all. A missing piece, but a great step in the future, would be to create Trusted Applications that are binary compatible so you wouldn’t have to recompile for different platforms or even when running on a different TEE solution. Unfortunately, I think we are a bit far away from that now. + +As mentioned, GDB just works. QEMU provides a GDB stub (the -s parameter), which means that you not only have the ability to develop and test the solution, but that you can also debug the entire solution, i.e., Linux kernel, TEE core and the Trusted Applications. By default the debugger is text based, but there are some quite decent graphical interfaces available if someone prefers using that instead. From a debugger point of view everything works as expected: you can set breakpoints, examine variables, memory etc. This is a very powerful tool and can save a lot of time and headaches both when trying to find bugs and when studying the code. Again, this doesn’t cost you anything. + +The 2016 developer workshop, hosted by GlobalPlatform in Santa Clara, covered everything discussed above. The workshop covered how to write code and debug the TEE core itself and how to write, deploy and test a Trusted Application using QEMU and OP-TEE, and attendees learned how to work with OP-TEE using QEMU. The takeaway was that using a local setup as described here is a good way for people new to TEE development to get started, and to make experienced users’ lives a bit easier when they are working with real products, by simplifying the setup and minimizing the turnaround time. Most likely the majority of their development could be done by using QEMU and then finalizing the remaining bits and pieces on the target hardware. + +For users who did not attend the workshop, but still would like to try this, we recommend that you head over to [https://github.com/OP-TEE/optee\_os](https://github.com/OP-TEE/optee_os) and read through the README.md file (prerequisites in section 4 and QEMU in section 5 should be sufficient to get a working setup by running a handful of command in a Linux shell). + +*** + +*This blog was originally posted the TeeSeminar.org site:  http://www.teeseminar.org/media\_center\_blog\_jbech.asp* diff --git a/src/content/blogs/testing-a-trusted-execution-environment.mdx b/src/content/blogs/testing-a-trusted-execution-environment.mdx new file mode 100644 index 0000000..ed1c4ce --- /dev/null +++ b/src/content/blogs/testing-a-trusted-execution-environment.mdx @@ -0,0 +1,117 @@ +--- +title: "OP-TEE Test: Testing a Trusted Execution Environment" +description: In this article, Joakim Bech provides a general background about + OP-TEE as well as testing OP-TEE using a tool called xtest (optee_test). Read + more here! +image: linaro-website/images/blog/Linaro-and-Riscure-release-banner +tags: [] +author: joakim-bech +date: 2016-02-10T13:54:07.000Z +link: /blog/core-dump/testing-a-trusted-execution-environment/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner url=https://wiki-archive.linaro.org/CoreDevelopment](/linaro-website/images/blog/core-dump) + +* Background +* Linaro gets into the picture +* Software components +* Host application +* Test Trusted Applications +* What about the tests coming from GlobalPlatform? +* Licenses +* Shortcomings and future improvements +* Final words + +Why you need to test your software is quite obvious and therefore this blog post will not be about that, instead I’ll talk a little bit about how we are testing [OP-TEE](https://github.com/OP-TEE) using a tool called xtest ([optee\_test](https://github.com/OP-TEE/optee_test)). I will also talk about what components are involved, what kind of tests are performed, what is missing, etc. But first let’s start with a short background. + +# Background + +Just as the other components in OP-TEE the test framework also has origins from ST-Ericsson and STMicroelectronics. A couple of years ago, when OP-TEE was being developed, the developers were engaged in GlobalPlatform testing, in the so called TestFest (for simplicity let’s call it OP-TEE even though it strictly isn’t correct, since back then the TEE solution didn’t really have a name, it was the ST-Ericsson TEE solution). At this time there were no official test suite nor compliance program ready and the goals with the TestFests were twofold, first to ensure that the different TEE vendors respective TEE solution was behaving according to the specification, secondly that the test tool(s) and the specifications themselves were correct. At the same time as this work took place there were quite a few “standalone” test cases being implemented as a complement to the GlobalPlatform tests. The nature of those were more to address the missing pieces in GlobalPlatform and to test corner cases, hardware- and extended features. So side by side the engineers at ST-Ericsson were running their own tests as well as the tests provided by the ones in charge of GlobalPlatform compliance program. + +![figure1](/linaro-website/images/blog/figure1) + +**Figure 1: Output from xtest** + +# Linaro gets into the picture + +When Linaro got involved in the development of OP-TEE we also had a need for testing the code we are developing and at the same time we would like to give our members the ability to use a suitable test framework. The only problem was that the test cases coming from GlobalPlatform couldn’t be shared with anyone (including Linaro) since to get access to those, a company either had to be a member of GlobalPlatform or it had to purchase the needed files directly from GlobalPlatform. Therefore the engineers from ST immediately started working on separating  the tests implemented by themselves from the ones that they had gotten from GlobalPlatform. When that job was completed, they shared their own developed tests with Linaro engineers and the members of Linaro. This piece of test code is what we today refer to when saying “**the standard test**” and that is also what you can find on GitHub since a couple of months ago on the OP-TEE project in the git called [optee\_test](https://github.com/OP-TEE/optee_test). That repository **is no longer private to** Linaro and its members. + +For OP-TEE development we have configured our repositories at GitHub so that a pull request will trigger a [Travis](https://travis-ci.org/OP-TEE) job which in turn would automatically trigger builds for all supported platforms. In addition to that we always will automatically run xtest using QEMU ([here](https://travis-ci.org/OP-TEE/optee_os/builds/102096254#L4514-L4526) is an example of how that could look like). In the long run we would like to also start using our own Linaro infrastructure (Jenkins + LAVA) as a complement to Travis so that we could do automatic testing on all the devices we are supporting in OP-TEE. + +# Software components + +The test framework consists of a [host application](https://github.com/OP-TEE/optee_test/tree/master/host/xtest), which is a normal user space application running in Linux. This is the piece of software that initiates and runs the actual tests and gathers test results etc. When it comes to Linux kernel there are no changes at all. It’s still the same TEE driver in use that is responsible for transporting the data back and forth between normal world, user space and secure world. Likewise on the secure side, there are no changes to the secure OS itself (TEE core). Instead all the code specific totesting will be performed as a set of different Trusted Applications (I’ll go more into details further down in this blog post). + +## Host application + +The host application, which by the way is the one we call “xtest”, has been divided into a couple of different files where each file corresponds to a certain area or feature. As of writing this, you will find the following files for the host application (there are a few more files, but those other files are the application and test framework itself): + +* [xtest\_1000.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/benchmark_1000.c): contains the **OS related** tests *–* basic OS features, panics, wait functionality, RPC messaging, signature header verification tests by loading a fake and a corrupt Trusted Application. It also tests invalid memory access and concurrent usage of Trusted Applications. + +* [xtest\_4000.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/regression_4000.c): contains all **crypto** related testing. It is basically testing crypto APIs that are exposed to the Trusted Application via the GlobalPlatform Internal TEE core specification. + +* [xtest\_5000.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/regression_5000.c): this file have tests for **shared memory** handling. + +* [xtest\_6000.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/regression_6000.c): contains test for **storage**, which exercises the GlobalPlatform secure storage API as well as the underlying “POSIX” file system API. + +* [xtest\_10000.c ](https://github.com/OP-TEE/optee_test/tree/master/host/xtest)has test code containing **extensions** going beyond the GlobalPlatform specifications. For example, this is where we are testing key derivation functionality like PBKDF2, HKDF and Concat KDF. + +* [xtest\_20000.c ](https://github.com/OP-TEE/optee_test/tree/master/host/xtest)this file also has tests related to storage, but this time those are more aimed at the **secure storage** implementation as such and they verify that files are actually being written to the file system, checking that they haven’t been corrupted and that they are being deleted etc. As an example, when initiating a store operation from secure world there should be file(s) created in Linux and accessible at ***/data/tee/{directory}/{filename}/block.xxx***. + +* [xtest\_benchmark\_1000.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/benchmark_1000.c): This is so far the only file related to **benchmarking** and it contains a couple of benchmark tests for the **secure storage** implementation. + +The main function could be found in the file [xtest\_main.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/xtest_main.c). This file basically just lists all test cases that should be enabled, and parses a few command line arguments followed by starting the actual tests. If you dive into the test code itself, you will see that the test framework itself implements macros that are used to evaluate if the test has passed or failed. You will, for example, frequently see ADBG\_EXPECT\_TEEC\_SUCCESS, ADBG\_EXPECT, ADBG\_EXPECT\_TEEC\_ERROR\_ORIGIN, ADBG\_EXPECT\_TRUE and ADBG\_EXPECT\_TEEC\_RESULT everywhere in the test code. There are others, but those are the most commonly used. + +On a top level, a test case is added using the macro ADBG*CASE\_DEFINE and that is what you can see on the top in each and every file listed above. As arguments, this macro takes a test label, a function pointer, a title, a short description of what it **is** testing, requirement ID and a short description of **how** it will be tested. As an example, have a look at \_XTEST\_TEE\_10001* which is defined [here](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/). As you can see, this particular test is supposed to test functionality related to key derivation. + +Within each test you can define sub-tests and to do so you have to wrap your code in-between Do\_ADBG\_BeginSubCase() and Do\_ADBG\_EndSubCase() calls. This isn’t something you strictly need to do, but it is a nice way of splitting up the tests into manageable sections, that will help better pinpointing where something went wrong in case of a test case failure. The number of tests and subtests is also something that will be presented when all test cases have been run (see Figure 3 further down). + +## Test Trusted Applications + +As I’ve mentioned above, all code related to testing could be found within a set of Trusted Applications. Below is a list of the Trusted Applications that are used by xtest. + +* [**concurrent**](https://github.com/OP-TEE/optee_test/tree/master/ta/concurrent): The concurrent Trusted Application is responsible for testing the ability to run several Trusted Applications simultaneously – a feature that has been [merged](https://github.com/OP-TEE/optee_os/pull/536/commits) into OP-TEE quite recently. For the host application you will find this application’s code in the [xtest\_1000.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/regression_1000.c#L156-L165) file. + +* \***\*[**create\_fail\_test**](https://github.com/OP-TEE/optee_test/tree/master/ta/create_fail_test)**:\*\* This is a tiny little TA used solely to test OP-TEE’s behaviour when loading a corrupt or fake Trusted Application.\*\*\*\* + +* [**crypt**](https://github.com/OP-TEE/optee_test/tree/master/ta/crypt): Despite the fact that there is the crypto API defined by GlobalPlatform, in OP-TEE, this particular Trusted Application also contains an AES-ECB and a SHA-256 (224) implementation within the TA itself, that is mostly due to historic reasons. But the majority of the entry points are calling GlobalPlatform Internal API functions. This Trusted Application tests MAC, AAED, hashes, ciphers, random number generator etc. + +* [**os\_test**](https://github.com/OP-TEE/optee_test/tree/master/ta/os_test): Mainly tests OS related features such as memory access rights, properties, time API and floating point operations as well as the MPA library (implementing big numbers). + +* [**rpc\_test**](https://github.com/OP-TEE/optee_test/tree/master/ta/rpc_test): Test that the RPC mechanism and loading of other Trusted Applications are working properly. It does this by letting the TA itself calling functionality in the crypt TA which will trigger loading of the crypt TA using RPC messages. + +* [**sims**](https://github.com/OP-TEE/optee_test/tree/master/ta/sims): Testing the Single Instance and Multiple Session features specified by GlobalPlatform. + +* [**storage**](https://github.com/OP-TEE/optee_test/tree/master/ta/storage): Contains tests related to the (secure) storage functionality. It tests all the functions of the GlobalPlatform specification that cover the so called “Persistent Objects”. On a high level or in Unix terms, this can be seen as the POSIX API (in reality there is a POSIX level behind the GP interfaces). + +* [**storage\_benchmark**](https://github.com/OP-TEE/optee_test/tree/master/ta/storage_benchmark): As the name indicates, this TA benchmarks storage operations. It reads and writes data of various chunk sizes and then in the end creates a performance report. + +# What about the tests coming from GlobalPlatform? + +The compliance test suite ([GlobalPlatform TEE Initial Configuration Compliance Test Suite v1.1.0.4](https://globalplatform.org/)) that can be purchased from GlobalPlatform (free for GP members) consists of a *compliance adaptation layer specification* that needs to be implemented to run the tests. It also contains a set of configuration files, more specifically – XML files specifying how functions should be called, what parameters to pass to them and what kind of test results to expect, i.e., you will **not** get any actual code that is ready to be compiled. How those XML files will end up being used is up to the end user. What we did early on was to configure xtest, so that it would be easy to extend it later to also include the compliance test suite from GlobalPlatform. So by putting the XML files on a certain [path](https://github.com/OP-TEE/optee_test#extended-test-global-platform-tests), using the adaptation layer, installing a couple of tools ([xalan](https://xalan.apache.org)) and running make with the “patch” as an argument, there will be a set of new Trusted Applications as well as patch xtest itself to also include the compliance tests. I.e., the XML files will be transformed into C code in this step. After performing that step you will not only run the so called standard test, but you will also run the compliance tests from GP in the same run. + +![figure2](/linaro-website/images/blog/figure2) + +**Figure 2: xtest overview** + +# Licenses + +One has to be careful when working with xtest, since there are different licenses in use in different areas. In general we usually use BSD Clause-2 license for most of our code. But in this case, when it comes to test related code, we’re using both BSD Clause-2 and GPLv2 license. All code running on secure side in the standard tests (Trusted Applications) are using the BSD Clause-2 license while the code running in normal world is using GPLv2 license. The same is true for the code used when extending xtest, however we must also follow the license stated by GlobalPlatform (GlobalPlatform Compliance License Agreement). In figure 2 below, you can see more clearly how xtest is divided and what licenses are in use. + +# Shortcomings and future improvements + +Today xtest is a test framework that does API testing of the exposed functionality for the Client API and for the Internal Core API. It contains quite a few test cases. Running the standard test on QEMU (Intel Core i5-4670K CPU @ 3.40GHz) results in the following: + +![figure3](/linaro-website/images/blog/figure3) + +**Figure 3: xtest standard test result** + +If you also enable the GP compliance tests, then you get even better coverage. So the APIs as such are being thoroughly tested and that is all good. However! Since it is security we’re dealing with here, we still have a lot to do when it comes to performing a focused security testing. There exist both concepts and tools and even companies solely dedicated to white box testing, where the goal is to find bugs and potential vulnerabilities in the code. For example, over the years people have found numerous bugs in Linux kernel by using Trinity ([fuzz tester](https://en.wikipedia.org/wiki/Fuzz_testing)). With Trinity the main goal isn’t strictly about enhancing security but rather to ensure that the system calls in Linux kernel are robust. A crash ([Linux kernel oops](https://en.wikipedia.org/wiki/Linux_kernel_oops)) can in some cases also be an entrance point for a kernel exploit and therefore it is still important to find and fix issues discovered by such tools as Trinity. Having something similar running on the secure side would probably be really useful. We have heard that GlobalPlatform will include fuzz testing in a new test suite that is currently being developed (draft is available for GP members here [TEE Security Test Suite v0.1.0](https://globalplatform.org/)). + +There are also [side channel attacks](https://en.wikipedia.org/wiki/Side-channel_attack). Some side channel attacks, like power analysis, cannot be done in software only, but still it would be worth adding tests covering such cases when possible. For example, [timing attacks](https://en.wikipedia.org/wiki/Timing_attack) are something one can do using only software and having test cases automatically performing timing attacks would be very useful. Since we mainly use Arm TrustZone™ it would also be worth adding tests covering the boundaries between the two worlds. I.e add tests that ensure that memory is or isn’t accessible from the other side. There are some memory region tests in xtest already today, but it would be great to  add more tests in this area. With some imagination one could also start to play with [TrustZone Address Space Controller](https://www.arm.com/products/silicon-ip-security) and add tests that ensure that the configuration of that system IP behaves as expected. + +# Final words + +I hope this post gave a useful  introduction to xtest and explained how we are testing OP-TEE. xtest sources  is also a good source to look at if you want to know more about how to write Trusted Applications and how to use the GlobalPlatform APIs. We are continuously adding tests and hopefully sooner than later we will also address the shortcomings mentioned above. But since most of it is open source and thereby freely available, we would be more than happy seeing people with experience in this area getting involved by giving feedback, coming up with ideas and maybe even submitting patches that improve xtest. diff --git a/src/content/blogs/the-need-for-linaro.mdx b/src/content/blogs/the-need-for-linaro.mdx new file mode 100644 index 0000000..baa9310 --- /dev/null +++ b/src/content/blogs/the-need-for-linaro.mdx @@ -0,0 +1,21 @@ +--- +author: david-rusling +date: 2010-06-02T14:00:00.000Z +link: /blog/industry-blog/the-need-for-linaro/ +title: The need for Linaro +tags: + - arm + - linux-kernel + - open-source +related: [] +description: At Linaro we want to be different from other Industry led initiatives.  We want to take the Arm community's embedded engineers and work directly with the various open source projects helping in the best possible way.  By writing code.  We also believe strongly in being open; that is why all of our plans and code are all on-line. + +--- + +Arm® is not so well known outside of its partnership. Its business model is to collaborate with our partners, helping to create low power, embedded systems. Arm processors are in many devices that today we take for granted; from web surfing mobile phones, tablet computers and high definition televisions. As part of creating processor designs, we have used open source software, including GNU tools and Linux®. Firstly to help design features, secondly to help prove that we've got the design right and thirdly to enable our partners and their customers to create great products. Over the years we've learned, sometimes painfully, how to work with the open source community to ensure that our technology is well understood and supported. It is not very well known, but Arm donates to many, many open source projects. + +Over time, Linux has become more and more the basis for products and so has been adopted by a great proliferation of Arm based products. This innovation is good; we're living in the future, using devices that would have seemed like science fiction 20 or 30 years ago. Less positive is that this variance in platforms can create fragmentation in code bases; slowing down this very innovation that we're so rightfully proud of. This is where Linaro comes in. Linaro™ is a collaboration vehicle for Arm and its partners to work cooperatively with the various open source communities, adding engineers and hardware. + +At Linaro we want to be different from other Industry led initiatives.  We want to take the Arm community's embedded engineers and work directly with the various open source projects helping in the best possible way.  By writing code.  We also believe strongly in being open; that is why all of our plans and code are all on-line. + +For me, it's been a really wild ride setting this organization up.   I look forward to more months and years of excitement and achievement. diff --git a/src/content/blogs/thundersoft-joins-linaro-96boards-manufacturing-partner-steering-committee-member.mdx b/src/content/blogs/thundersoft-joins-linaro-96boards-manufacturing-partner-steering-committee-member.mdx new file mode 100644 index 0000000..a7124cf --- /dev/null +++ b/src/content/blogs/thundersoft-joins-linaro-96boards-manufacturing-partner-steering-committee-member.mdx @@ -0,0 +1,44 @@ +--- +excerpt: Linaro announces that Thundersoft has joined the 96Boards initiative as + a Steering Committee member and Manufacturing Partner. This new collaboration + allows Thundersoft to both influence the development of the 96Boards + specifications and initiative, and produce 96Boards products with support + provided on the 96Boards forum. +title: Thundersoft joins Linaro 96Boards as both Manufacturing Partner and + Steering Committee member +description: Linaro announces that Thundersoft has joined the 96Boards + initiative as a Steering Committee member and Manufacturing Partner. This new + collaboration allows Thundersoft to both influence the development of the + 96Boards specifications and initiative, and produce 96Boards products with + support provided on the 96Boards forum. +image: linaro-website/images/blog/96boards-specification-consumer-edition-v2 +author: linaro +date: 2016-06-15T10:58:50.000Z +tags: + - open-source +link: /news/thundersoft-joins-linaro-96boards-manufacturing-partner-steering-committee-member/ +related: [] + +--- + +Cambridge, UK: 15 June 2016 + +Linaro Ltd, the collaborative engineering organization developing open source software for the Arm® architecture, announced today that Thundersoft has joined the 96Boards initiative as a Steering Committee member and Manufacturing Partner. This new collaboration allows Thundersoft to both influence the development of the 96Boards specifications and initiative, and produce 96Boards products with support provided on the 96Boards forum. + +96Boards is Linaro’s initiative to build a single software and hardware community across low-cost development boards based on Arm technology. Thundersoft is a leading Smart Device Platform Technology Provider specialized in the mobile OS segment. Thundersoft will be able to leverage 96Boards to provide its services to a broader market with more rapidly developed and easily maintained solutions benefiting from the platform standardization provided by the initiative. + +“Historically, there has been no affordable way for developers to work on the latest Armv8 64-bit hardware,” said Larry Geng, CEO of Thundersoft. “The 96Boards initiative has helped to provide the Arm ecosystem with a standardized, silicon-independent platform that is opening up new markets by delivering the latest Arm technology at a reasonable cost to a much wider audience. We expect that by helping develop the 96Boards initiative, we will encourage a broader range of engineering talent to develop on Arm, which will in turn lead to accelerated innovation and faster time to market for new products that will benefit from Thundersoft services.” + +The 96Boards steering committee now includes ten companies who are working together on Consumer, Enterprise, Digital Home, Networking and IoT specifications. To date, the Consumer and Enterprise Edition specifications have been released with Consumer edition boards readily available and the first Enterprise edition board currently in production and expected to ship in the next month. In total, there are now over twenty main boards being sold or under development with a large number of mezzanine products and other accessories being released. + +“Thundersoft joining as a Steering Committee member and Manufacturer Partner is a big step forward in the maturing of the 96Boards initiative,” said Yang Zhang, Director of 96Boards. “Thundersoft is well positioned to provide a software service to our existing members engaged in 96Boards and their customers who are developing next generation solutions based on the 96Boards specifications. We look forward to the initiative fostering increased cooperation opportunities for Thundersoft in both hardware and software development.” + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 200 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [http://www.96Boards.org](https://www.96boards.org/) + +**About Thundersoft** + +Thundersoft, listed on the Shenzhen Stock Exchange (SZ: 300496), is the world's leading provider of mobile operating system and smart device solutions. Focusing in areas like mobile phone/tablet, IoT, automotive and enterprise, etc., with full-stack engineering resource covering mobile software, hardware and all layers of operating system, rich technology/solution portfolio, strategic partnership with key SoC/ISV/IHV vendors, and global support networks, Thundersoft is able to provide professional, solid, responsive and turn-key mobile platform technology, services and solutions, and enable customers worldwide to build high-quality and quick-to-market products. Learn more at [www.thundersoft.com](http://www.thundersoft.com) diff --git a/src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx b/src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx new file mode 100644 index 0000000..c11c79f --- /dev/null +++ b/src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx @@ -0,0 +1,38 @@ +--- +title: Tier IV joins Linaro 96Boards Steering Committee +author: linaro +date: 2018-06-20T08:00:00.000Z +description: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm® ecosystem, announced today that Japan-based + intelligent vehicle technology company Tier IV, Inc. has joined the 96Boards + initiative as a Steering Committee member. +image: linaro-website/images/blog/96boards-home-page-latest +tags: + - arm + - linux-kernel + - open-source +related: [] + +--- + +# Tier IV joins Linaro 96Boards Steering Committee + +\[Cambridge, UK; 20 March 2018] Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, announced today that Japan-based intelligent vehicle technology company Tier IV, Inc. has joined the 96Boards initiative as a Steering Committee member. Tier IV owns Autoware, ROS-based open source software, enabling self-driving mobility to be deployed in open city areas. This new collaboration provides Tier IV with standardized hardware on which it can extend its Autoware based offerings to support multiple SoC platforms. The 96Boards Steering Committee provides a neutral forum in which Tier IV can cooperate with other 96Boards partners to accelerate the development of the 96Boards specifications and new products based on Autoware. + +96Boards is Linaro’s initiative to build a single worldwide software and hardware community across low-cost development boards based on Arm technology. A large range of products compliant with the 96Boards specifications are already available worldwide and this range is supplemented with additional hardware functionality provided through standardized mezzanine boards. Linaro plans to work with Tier IV and other companies in the automotive industry on building and optimizing Autoware on Arm. 96Boards will help define the standardized hardware platforms on which to maintain and grow the Autoware code base and extend support for it across a broader range of SoC solutions with consistent support. + +“Autoware is the world’s first complete stack of open-source software for fully autonomous driving. It is not just a software platform, but a core system for the ecosystem of autonomous driving technology. The development of Autoware has already made a significant contribution to the practice of emerging autonomous vehicles all over the world. We are proud of Autoware being used by so many, and a very wide variety of, companies, universities, and research institutes. We are happy to support as many computers, sensors, and vehicles as possible.” said Shinpei Kato, Associate Professor at The University of Tokyo and the Founder and CTO of Tier IV. “The current version of Autoware is mostly used for R\&D purposes. With Linaro’s participation, we are now aiming Autoware to be more production- and service-oriented. In particular, we will enable Autoware support on 96Boards, which now supports a full range of the major Arm platforms. The collaboration of Autoware and 96Boards will be the next core of the complete production-quality platform for all Arm SoC vendors, solution providers, and individual developers in the autonomous driving market.” + +The 96Boards steering committee now includes more than twenty companies who are working together on Consumer, Enterprise, TV Platform, Networking, IoT and SOM specifications. To date, the Consumer, Enterprise and TV Platform and IoT specifications have been released with boards available for each. In addition there are a large number of mezzanine products and other accessories available for a range of applications from industrial control and robotics, through AI, HPC and IoT, on to data centers and edge applications. + +“96Boards products are used in many application spaces as development platforms. There are opportunities for 96Boards in many aspects of automotive development, including pre-production, IVI, ADAS, AD and VCU,” said Yang Zhang, Director of 96Boards. “Tier IV and the Autoware open source platform is one of the most widely used ADAS/AD solutions today. As part of the 96Boards Steering Committee, we will be collaborating on the accelerated adoption of a choice of Arm-based solutions in the automotive space.” + +### About Tier IV + +Tier IV is an academic startup company that provides products and services for intelligent  vehicles. Currently more than a hundred companies all over the world are adopting Tier IV’s solutions built on top of Autoware to develop autonomous vehicles and/or their components. Through the provision of flexible solutions built on top of Autoware, Tier IV contributes significantly to open innovations of autonomous driving technologies. [http://www.tier4.jp/en/](https://linaro.co/tierIV) + +### About Linaro + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. + +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96boards.org/). diff --git a/src/content/blogs/tricks-for-debugging-qemu-rr.mdx b/src/content/blogs/tricks-for-debugging-qemu-rr.mdx new file mode 100644 index 0000000..9385ff7 --- /dev/null +++ b/src/content/blogs/tricks-for-debugging-qemu-rr.mdx @@ -0,0 +1,48 @@ +--- +author: peter-maydell +comments: false +date: 2015-06-22T22:02:00.000Z +description: "Over the years I’ve picked up a few tricks for tracking down + problems in QEMU, and it seemed worth writing them up." +excerpt: "Peter Maydell talks about some helpful tricks for debugging QEMU - rr + that he has learned working with it. " +link: /blog/core-dump/tricks-for-debugging-qemu-rr/ +tags: + - arm + - qemu +title: Tricks for debugging QEMU — rr +related: [] + +--- + +Over the years I’ve picked up a few tricks for tracking down problems in QEMU, and it seemed worth writing them up. First on the list is a tool I’ve found relatively recently: [rr, from the folks at Mozilla.](http://rr-project.org/) + +rr is a record-and-replay tool for C and C++: you run your program under the recorder and provoke the bug you’re interested in. Then you can debug using a replay of the recording. The replay is deterministic and side-effect-free, so you can debug it as many times as you want, knowing that even an intermittent bug will always reveal itself in the same way. Better still, rr recently gained support for reverse-debugging, so you can set a breakpoint or watchpoint and then run time backwards to find the previous occurrence of what you’re looking for. This is fantastic for debugging problems which manifest only a long time after they occur, like memory corruption or stale entries in cache data structures. The idea of record-and-replay is not new; where rr is different is that it’s very low overhead and capable of handling complex programs like QEMU and Mozilla. It’s a usable production quality debug tool, not just a research project. It has a few rough edges, but the developers have been very responsive to bug reports. + +Here’s a worked example with a real-world bug I tracked down last week. (This is a compressed account of the last part of a couple of weeks of head-scratching; I have omitted various wrong turns and false starts…) + +I had an image for QEMU’s Zaurus (“spitz”) machine, which managed to boot the guest kernel but then got random segfaults trying to execute userspace. Use of git bisect showed that this regression happened with [commit 2f0d8631b7](http://git.qemu.org/?p=qemu.git;a=commitdiff;h=2f0d8631b7;hp=2e1198672759eda6e122ff38fcf6df06f27e0fe2). That change is valid, but it did vastly reduce the number of unnecessary guest TLB flushes we were doing. This suggested that the cause of the segfaults was a bug where we weren’t flushing the TLB properly somewhere, which was only exposed when we stopped flushing the TLB on practically every guest kernel-to-userspace transition. + +Insufficient TLB flushing is a little odd for an Arm guest, because in practice we end up flushing all of QEMU’s TLB every time the guest asks for a single page to be flushed. (This is forced on us by having to support the legacy Armv5 1K page tables, so for most guests which use 4K pages all pages are “huge pages” and take a less efficient path through QEMU’s TLB handling.) So I had a hunch that maybe we weren’t actually doing the flush correctly. OK, change the code to handle the “TLB invalidate by virtual address” guest operations so that they explicitly flush the whole TLB — bug goes away. Take that back out, and put an assert(0) in the cputlb.c function that handles “delete a single entry from the TLB cache”.  This should never fire for an Arm guest with 4K pages, and yet it did. + +At this point I was pretty sure I was near to tracking down the cause of the bug; but the problem wasn’t likely to be near the assertion, but somewhere further back in execution when the entry got added to the TLB in the first place. Time for rr. + +Recording is simple: just rr record qemu-system-arm args.... Then rr replay will start replaying the last record, and by default will drop you into a gdb at the start of the recording. Let’s just let it run forward until the assertion: + +![code section 1.7](/linaro-website/images/blog/code-section-1.7) + +Looking back up the stack we find that we were definitely trying to flush a valid TLB entry: + +![code section 2.7](/linaro-website/images/blog/code-section-2.7) + +and checking env->tlb\_flush\_mask and env->tlb\_flush\_addr shows that QEMU thinks this address is outside the range covered by huge pages. Maybe we miscalculated them when we were adding the page? Let’s go back and find out what happened then: + +![code section 3.7](/linaro-website/images/blog/code-section-3.7) + +(Notice that we hit the assertion again as we went backwards over it, so we just repeat the reverse-continue.) We stop exactly where we want to be to investigate the insertion of the TLB entry. In a normal debug session we could have tried restarting execution from the beginning with a conditional breakpoint, but there would be no guarantee that guest execution was deterministic enough for the guest address to be the same, or that the call we wanted to stop at was the only time we added a TLB entry for this address. Stepping forwards through the tlb code I notice we don’t think this is a huge page at all, and in fact you can see from the function parameters that the size is 1024, not the expected 4096. Where did this come from? Setting a breakpoint inarm\_cpu\_handle\_mmu\_fault and doing yet another reverse-continue brings us to the start of the code that’s doing the page table walk so we can step forwards through it. (You can use rn and rs to step backwards if you like but personally I find that a little confusing.). Now rr has led us to the scene of the crime it’s very obvious that the problem is in our handling of an XScale-specific page table descriptor, which we’re incorrectly claiming to indicate a 1K page rather than 4K.  [Fix that](http://lists.gnu.org/archive/html/qemu-devel/2015-05/msg05956.html), and the bug is vanquished. + +Without rr this would have been much more tedious to track down. Being able to follow the chain of causation backwards from the failing assertion to the exact point where things diverged from your expectations is priceless. And anybody who’s done much debugging will have had the experience of accidentally stepping or continuing one time too often and zooming irrevocably past the point they wanted to look at — with reverse execution those errors are easily undoable. + +I can’t recommend rr highly enough — I think it deserves to become a standard part of the Linux C/C++ developer’s toolkit, as valgrind has done before it. + +*Originally posted at [https://translatedcode.wordpress.com/2015/05/30/tricks-for-debugging-qemu-rr/](https://translatedcode.wordpress.com/2015/05/30/tricks-for-debugging-qemu-rr/)* diff --git a/src/content/blogs/u-boot-on-arm32-aarch64-and-beyond.mdx b/src/content/blogs/u-boot-on-arm32-aarch64-and-beyond.mdx new file mode 100644 index 0000000..3b2d01b --- /dev/null +++ b/src/content/blogs/u-boot-on-arm32-aarch64-and-beyond.mdx @@ -0,0 +1,86 @@ +--- +author: linus-walleij +comments: false +date: 2015-08-20T15:14:21.000Z +description: > + In this article, Linus Walleij looks at U-Boot on Arm32, Aarch64 and beyond. + Read about his findings here! +excerpt: U-Boot became the de facto bootloader on most Arm systems during the + early 2000s. What is the best bootloader to use for any one system is a + subject of debate. There have been pushes to different “there can be only one” + approaches, but the recent consensus is to “use the right tool for the + job” Learn more +link: /blog/core-dump/u-boot-on-arm32-aarch64-and-beyond/ +tags: + - arm +title: U-Boot on Arm32, AArch64 & Beyond +related: [] + +--- + +U-Boot became the de facto bootloader on most Arm systems during the early 2000s. It grew out of an earlier flora of smaller and custom boot loaders such as RedBoot and Open Handhelds Arm Bootloader. Currently the main alternatives are [the Little Kernel bootloader](https://developer.qualcomm.com/qfile/28821/lm80-p0436-1_little_kernel_boot_loader_overview.pdf), which has been used by Qualcomm and Google for a series of Android devices, and the [UEFI-compliant Tianocore](http://www.tianocore.org/) (also known as EDK II) bootloader. + +What is the best bootloader to use for any one system is a subject of debate. There have been pushes to different “there can be only one” approaches, but the recent consensus is to “use the right tool for the job”, while people may have differing opinions on what the right tool is. + +**Boot Chain** +All SoCs have some way to bootstrap their CPU(s) to execute code on cold start. On an older Arm32 system, the execution is usually started in an on-chip ROM, which in turn continue execution either in NOR flash (memory-mapped flash memory) or by initializing the main RAM (which is normally not accessible at boot) and loading a proper boot loader from a NAND flash or eMMC or SD card. + +Sometimes several steps need to be performed to boot a system, and as some code may need to execute from on-chip memory or locked down cache until the RAM is initialized, initial steps can be very small boot stages (programs). + +Eventually a fairly large program single-threaded program is loaded into memory, and its task is to load and execute the final operating system from images (binary objects, files) stored on some media. For simplicity, this program is usually executed from 1-to-1-mapped physical memory. This program may also have the ability to reformat and install new images on the system. + +This program is referred to as the boot loader. The stages up until this program is loaded is handled by [Arm Trusted Firmware](http://www.slideshare.net/linaroorg/arm-trusted-firmareforarmv8alcu13) on the Arm reference designs for AArch64. + +The boot loader will typically be a bit interactive (has a prompt) and support booting the final operating system from hard disk, memory card, flash memory, ethernet connection, USB cable, or even through light morse code from an IrDA sensor. It places the final operating system image in memory, passes some information to it and kicks off execution at the start of executable memory. + +From this point, the operating system needs to set up virtual memory, caches and everything else needed to get the system into full-flight mode. + +**Chain of Trust** +If a chain of trust shall be preserved across these stages, the first point of execution needs to be trusted and contain routines for checking validity of the next executable program all the way. This is usually achieved using public key cryptography, where a public key is stored in the ROM (or similar location inside the device) and binaries to be executed need to be signed by the secret key corresponding to that public key. This way the device will not contain any secret keys. Sometimes a certificate chain is used to distribute the signing authorization. + +**Initial U-Boot AArch64 Support** + +The AArch64 (Arm64) support for U-Boot was pioneered by Scott Wood, David Feng and York Sun from Freescale in 2013 to support their LS2085 platforms. Leo Yan from Marvell joined the efforts, and thanks to these people U-Boot can start and boot Linux on a range of Armv8/AArch64 systems. + +**Arm Fastmodel Support** +Freescale’s submissions included fastmodel support, a specific customization known as Foundation model or simply FVP. This is a cycle-exact AArch64 emulator made by Arm Ltd, which behaves akin to a Versatile Express reference board, just with the difference that the whole system is emulated in software. + +In order to load binary images into the emulated memory, so-called semihosting is used. This is basically a way for the code running on the emulator to talk directly to the emulator, i.e. for it to be aware that it is not running on real hardware. By issuing a parametrized HLT instruction, the code running in the model can ask for services, such as to retrieve files into the memory, from the emulator. + +When I started working on AArch64 support for U-Boot I augmented this code a bit so that we now have \[a command called *smhload* http://git.denx.de/?p=u-boot.git;a=commitdiff;h=202a674bb8b7d7867503224857a2b0e04617d9b3 that will load a file into the emulated memory akin to how files are loaded from flash memory or over Ethernet+TFTP. + +By working on the Foundation model, I could verify that execution and interactive prompt was working, and I could continue with support for real reference hardware. + +**Arm Juno Development System Support** +Freescale’s attempt had been focused around emulated reference hardware and later their own hardware. When I started working on AArch64 the scope was on [the 64 Bit Juno Arm ](http://www.arm.com/files/pdf/Juno_ARM_Development_Platform_datasheet.pdf)Development Platform. The idea was to showcase U-Boot on this real hardware as a reference point for the rest of the Arm vendor ecosystem. If we could get U-Boot working nicely on Juno, we could provide a trusted starting point for others. + +First we had to make Juno start the compiled U-Boot. Arm recommend that U-Boot is started from the Arm Trusted Firmware, which is essentially the ROM for the Juno. The trusted firmware performs the boot chain as described above in several stages or *Boot Levels* called BL1, BL2, BL3-1, BL3-2 BL3-3. I only needed to consider myself with the last boot level, BL3-3, which is the level containing a “real” bootloader binary. In the examples, BL3-3 was Tianocore UEFI. By compiling U-Boot to address 0xe0000000 and replacing UEFI with the resulting binary, U-Boot was executed by the Arm Trusted Firmware. + +At first the system would not boot at all - the Juno went catatonic. By instrumenting U-Boot with a [low-level UART print hack](http://dflund.se/~triad/krad/junoboard/0001-vexpress64-assembly-debugging-and-uglyfix.patch) to push strings to the console before initializing the rest of U-Boot, I could determine the cause: the MPIDR (Multi-Processor ID register) had totally different meaning and contents on a multi-cluster machine. The U-Boot code was adapted for a single cluster of symmetric CPUs, not for multiple clusters of CPUs, such as the cluster of two Cortex-A57s and four Cortex-A53s found on the Juno. + +Freescale’s system had the ROM or similar mechanism enter U-Boot from both CPUs, and when it reached U-Boot all slave CPUs were immediately dispatched to a spin table while execution of the single-threaded U-Boot should continue on the primary CPU. However the branch\_if\_slave assembler macro would think all CPUs on the system were secondary CPUs. + +Since the Juno board was only initiating execution of the boot loader on the primary CPU, this problem was solved with \[a patch making U-Boot assume single entrance http://git.denx.de/?p=u-boot.git;a=commitdiff;h=23b5877c64562a314f8d8c60d0066cd346f2d886 (i.e. only one CPU will execute it) and after this we got all the way to prompt. A special configuration symbol, ArmV8\_MULTIENTRY was created for systems such as Freescale to select. This way single-entrance was made the norm. + +Now U-Boot was working to prompt at Juno hardware, so I could test loading a kernel by compiling in Y-modem binary loading support and uploading a kernel Image file and a device tree to the memory and start execution using Y-modem and boot it. It worked fine. \[A patch for initial Juno support http://git.denx.de/?p=u-boot.git;a=commitdiff;h=ffc103732c82faa945c85bbb7c5c34c30b6fac72) was submitted upstream and merged. + +Uploading a big kernel and initramfs over the serial port at 115200 baud was quite tiresome, so I immediately started to get U-Boot to load kernels over the ethernet port, resulting in \[a patch supporting SMSC9118 ethernet booting http://git.denx.de/?p=u-boot.git;a=commitdiff;h=b31f9d7a4aea23a8a9d007356a2b61e503e69daa. This was it is possible to quickly boot a kernel using ethernet and TFTP. + +It was now quick and efficient to develop Linux using U-boot, especially if you compile in a boot script into the ethernet/TFTP boot so that all you really need to do it reset the machine and it would immediate download a new kernel from the TFTP server and run it. + +However it is nice to be able to flash a kernel and a filesystem into the on-board flash memory in the Juno and use that to just boot the machine, especially for demos and similar where you want to prepare the machine and just use it. Thus I also added flash support to the Juno, the tricky part being \[a patch to handle the AFS partitions http://git.denx.de/?p=u-boot.git;a=commitdiff;h=4bb6650632a3e36185f689c56ea31f189ce39325 in the flash - this was a new Arm-specific flash image format that relies in footers in the end of the last erase block of the flash. After adding this, I could make \[a patch making this the default boot method http://git.denx.de/?p=u-boot.git;a=commitdiff;h=10d1491b3dea43182aec5cdce8f81ca520400c4b for the Juno, so the boot chain was self-contained on the device. + +**Future Directions** +We now have pieced together a system that will start U-Boot from Arm Trusted Firmware and then have U-Boot load the Linux kernel and a device tree and start it. Are there problems remaining? + +* One of the big outstanding issues are those where things are fragile because memory references need be hard-coded in U-Boot or Arm Trusted Firmware. For example U-Boot currently \[assumes that Arm TF will use 16MB http://git.denx.de/?p=u-boot.git;a=commitdiff;h=303557089f3db253eaec6f38dece204fd154b6ac of the DRAM memory. If the Arm TF change things around and use more or less memory, U-Boot needs to be reconfigured and recompiled. U-Boot on the other hand, will then pass whatever knowledge it has about the memory to the Linux kernel by augmenting the device tree. So if Arm TF could communicate the memory available to U-Boot and the OS this would be great. + +* U-Boot relies on prior boot stages such as Arm Trusted Firmware to install \[PSCI handlers]http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0022c/index.html, while on Armv7 this was usually done by augmenting U-Boot to do the same. Letting U-Boot install PSCI handlers is a bit bogus, since it is a piece of resident code left in memory after U-Boot has executed and not really “boot loader” code. U-Boot was augmented to compile these into a special memory area, copy them there and leave them around for the operating system to use later. Still there are people who might like to do this on Armv8 U-Boot, especially those not using Arm Trusted Firmware. + +* People apparently toy with the idea of booting U-Boot on bare metal, using a very small or no ROM nor Arm Trusted Firmware, letting U-Boot just execute immediately on the system. As U-Boot relies on something else to set up main memory and providing PSCI, this currently does not work. Doing this would require U-Boot to initialize memory and install PSCI handlers. It would also need to be small enough to execute from on-chip RAM. + +* Chain of trust booting with signed boot levels, signed U-Boot and a signed kernel image and a signed device tree, making an example of a totally locked-down system. The Flattened Image Tree (FIT) supported by U-Boot is likely the best way forward here, but requires U-Boot to access public key infrastructure to verify images unless you want to compile the public key directly into U-Boot, which is often not a good idea. + +* Fastboot - the Android boot protocol used by the Little Kernel, exists in U-Boot but has not been tested or verified. It can use USB or Ethernet alike. + +* More hardware support - such as booting from the USB stick or MMC/SD card found in the Juno board. This was not covered by the experimental port. diff --git a/src/content/blogs/update-android-kernel-tools.mdx b/src/content/blogs/update-android-kernel-tools.mdx new file mode 100644 index 0000000..6d50e56 --- /dev/null +++ b/src/content/blogs/update-android-kernel-tools.mdx @@ -0,0 +1,69 @@ +--- +keywords: Linaro, Linux on Arm, Open Source, Linux, Arm,Android, Kernel, Updates, Tools +title: Update Android Kernel Related Files Without Getting the sdcard Out +description: In this article, Yongqin Liu shows us an easy way to update Android + Kernel related files without getting the sdcard out. Read more here! +image: linaro-website/images/blog/Client_Devices_banner_pic +author: yongqin-liu +date: 2013-02-05T02:53:36.000Z +tags: + - android + - linux-kernel + - toolchain +link: /blog/android-blog/update-android-kernel-tools/ +related: [] + +--- + +Do you feel it's difficult or complicated to update kernel related files for a running Android device? + +Do you feel that it's boring to get the sdcard out when you just want to update some kernel files when the Android device is somewhere else? + +The Linaro Android team now has tools to help you update kernel related files for an Android device through the use of a few commands--mostly only one command. + +Do you want to update the kernel related files with a new boot.tar.bz2? You can do so with the following command: + +``` +./update-android.sh out/target/product/pandaboard/boot.tar.bz2 +``` + +Do you want to update only the board.dtb file? No problem, you can do: + +``` +./update-android.sh out/target/product/pandaboard/boot/board.dtb +``` + +Do you just want to update the loglevel in /init.rc to 8?  Use the following command: + +``` +./update-uInitrd.sh /tmp/init.rc +``` + +What do you think about the tools? Do you want to give these commands a try? If so, you can get these scripts by using the following command: + +``` +git clone http://android.git.linaro.org/git-ro/platform/external/linaro-android-tools.git +``` + +More information on these tools can be found at: +[https://wiki-archive.linaro.org/Platform/Android/KernelUpdateTools](https://wiki-archive.linaro.org/Platform/Android/KernelUpdateTools) + +***About the Linaro Android Team*** + +*The primary goals of the Linaro Android Team is to develop and release tested [monthly builds](http://releases.linaro.org/) of Android for Galaxy Nexus, Panda, Snowball, Origen, and Versatile Express, collaborate with upstream [development efforts](https://wiki-archive.linaro.org/Platform/Android/UpstreamWork) and perform monthly [toolchain benchmarking](https://wiki-archive.linaro.org/Platform/Android/AndroidToolchainBenchmarking).* + +* *Mailing List:  linaro-dev@lists.linaro.org ([subscribe](https://lists.linaro.org/mailman3/lists/linaro-dev.lists.linaro.org/))* + +* *[Team Members](/about/)* + +*More information about the Linaro Android Team can be found at: [https://wiki-archive.linaro.org/Platform/Android](https://wiki-archive.linaro.org/Platform/Android)* + +***About the Linaro Kernel Team*** + +*The Kernel Consolidation Working Group targets the Linux kernel. Its goals are to consolidate source repositories, unify support across SoCs, develop new kernel infrastructure and features and more. Our acid test: shipping a single source tree that integrates support for multiple modern Arm SoCs.* + +* *Mailing List: linaro-dev@lists.linaro.org ([subscribe](https://lists.linaro.org/mailman3/lists/linaro-dev.lists.linaro.org/))* + +* *[Team Members](/about/)* + +*More information about the Linaro Kernel Team can be found at: [https://wiki-archive.linaro.org/WorkingGroups/Kernel](https://wiki-archive.linaro.org/WorkingGroups/Kernel)* diff --git a/src/content/blogs/video-plays-key-role-expanding-linaros-community-building-future-linux-arm.mdx b/src/content/blogs/video-plays-key-role-expanding-linaros-community-building-future-linux-arm.mdx new file mode 100644 index 0000000..4e5d117 --- /dev/null +++ b/src/content/blogs/video-plays-key-role-expanding-linaros-community-building-future-linux-arm.mdx @@ -0,0 +1,32 @@ +--- +title: Video plays a key role in expanding Linaro’s community and building the + future of Linux on Arm +image: linaro-website/images/blog/linaro-logo +tags: [] +author: linaro +date: 2012-06-29T11:18:55.000Z +link: /news/video-plays-key-role-expanding-linaros-community-building-future-linux-arm/ +categories: + - news +description: HONG KONG - 29 JUN 2012 +related: [] + +--- + +HONG KONG - 29 JUN 2012 + +During the week of 28 May to 1 June 2012 over 230 developers and engineers, from around 50 organizations and the Arm open-source community gathered at the Gold Coast Hotel for Linaro Connect Q2.12 to plan out and code the future of Linux on Arm. + +The entire event was broadcast on YouTube by the [Novacut](https://launchpad.net/novacut) video crew and video blogger Nicolas [Charbonnier (Charbax](http://armdevices.net/category/companies/linaro/)) and, to make Linaro Connect more accessible in real time, all the key sessions were done using Google Hangouts. During this event a concentrated effort was made to make the event more accessible for those attending remotely by Linaro’s use of Google + Hangouts on Air () to allow for better audio and visual remote participation. This was the first time Google+ Hangouts on Air was used to record all the sessions and there were many challenges that had to be addressed, such as finding a way to let participants know where the sessions could be found ahead of time, effective room set up, camera locations and how to set up the hangouts to go to the same youtube channel all had to be resolved during the week. There were many lessons learned during this event and much feedback was received from those who participated remotely to help improve these efforts for our next event. + +Charbax interviewed attendees, captured sessions, demos and more from the event. Novacut is no stranger to Linaro Connect events and they have been assisting Linaro in capturing these events through video, pictures and interviews for the past year, however during this event there was an effort to video every session and activity as opposed to just key ones. Charbax joined us for the first time in Hong Kong and has already made a great impression on those who have seen his videos from the event. Most notable was the demonstration of the toolchain improvements to Linaro’s Android Evaluation Build (LEB) that was demonstrated on the PandaBoard but all the toolchain improvements are applicable to all our member boards. This video ([http://armdevices.net/2012/06/02/linaro-improvements-to-android-4-0-4-performance-on-the-pandaboard-ti-omap4430-platform/ ](http://armdevices.net/2012/06/02/linaro-improvements-to-android-4-0-4-performance-on-the-pandaboard-ti-omap4430-platform/)) has already generated more than 200,000 views. + +Overall between Novacut ([http://www.youtube.com/user/LinaroOrg](http://www.youtube.com/user/LinaroOrg)), Charbax[(http://armdevices.net/category/companies/linaro/ ](http://armdevices.net/category/companies/linaro/)) and the Google +Hangouts on Air ([]()) more videos will be added to the Linaro Connect Q2.12 Resource page by the end of July. + +Another notable discussion that was captured on video by Novacut was “Is it time for Arm in the Enterprise?” ([http://youtu.be/UPw11z49KXs](http://youtu.be/UPw11z49KXs)). This panel discussion was facilitated by Linaro CTO David Rusling and included Tim Wesselman of HP ([http://www8.hp.com/](https://www8.hp.com/uk/en/hp-information.html)), Mark Shuttleworth of Canonical [(http://www.canonical.com/](http://www.canonical.com/)), Jeff Underhill of Arm ([http://www.arm.com/](http://www.arm.com/)) and Jon Masters of Red Hat ([http://www.redhat.com/](http://www.redhat.com/)) who “took the bullets as they came” and answered questions around Arm in Enterprise ecosystem. + +Join us at Linaro Connect + +Linaro Connect is held every three to four months to bring the Linux on Arm community together to work on the latest system-on-chip (SoC) developments, plan new engineering efforts and hold engineering hacking sessions. These events give the Linux community an opportunity to be a part of the Linaro team and help to define the Arm tools, Linux kernels and builds of key Linux distributions including Android and Ubuntu on member SoCs. + +For more information on the company, access to software and tools, and information on the community and open engineering, visit www.linaro.org diff --git a/src/content/blogs/view-linaro-10-11-release-webinar.mdx b/src/content/blogs/view-linaro-10-11-release-webinar.mdx new file mode 100644 index 0000000..eaa3e98 --- /dev/null +++ b/src/content/blogs/view-linaro-10-11-release-webinar.mdx @@ -0,0 +1,33 @@ +--- +author: linaro +date: 2010-12-12T12:07:46.000Z +description: CAMBRIDGE, UK - 12 DEC 2010 +link: /news/view-linaro-10-11-release-webinar/ +title: View the Linaro 10.11 release webinar +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 12 DEC 2010 + +## View the Linaro 10.11 release and beyond webinar that took place at Techcon + +### Overview: + +This talk outlined the progress made by Linaro, a new not for profit organisation that drives forward innovation and aligned investment in open source software and tools. It described Linaro's current engineering activity, how it works with the open source community, the 10.11 release, and looks forward to the next six month engineering cycle. The reason for doing a mixture of upstream development in key areas such as Linux kernel, tools and graphics as well as a regular validated release are discussed. The relevance to distribution owners, OEMs, silicon partners and open source community developers is covered. + +### This presentation is relevant to: + +* Operators considering their open source strategies +* OEM's/ODM's implementing Linux based products +* Silicon partners looking to have tier#1 Linux support +* Open source developers wanting to get involved + +### Webinar registrants will learn: + +1. How Linaro is relevant to Linux based open source projects and product development +2. Progress at Linaro since the launch at Computex +3. What you can take advantage of in the 10.11 release and plans for 11.05 +4. How silicon partners can use Linaro to improve their Linux support +5. How to join the Linaro community and get involved diff --git a/src/content/blogs/watch-announcement-linaro-computex.mdx b/src/content/blogs/watch-announcement-linaro-computex.mdx new file mode 100644 index 0000000..fb11e8b --- /dev/null +++ b/src/content/blogs/watch-announcement-linaro-computex.mdx @@ -0,0 +1,15 @@ +--- +author: linaro +date: 2010-06-02T10:46:06.000Z +description: Linaro is a collaborative engineering organization consolidating + and optimizing open source software and tools for the Arm architecture. +link: /news/watch-announcement-linaro-computex/ +title: Watch the announcement of Linaro at Computex +tags: [] +related: [] + +--- + +CAMBRIDGE, UK - 2 JUN 2010 + +For more information on the company, access to software and tools, and information on the community and open engineering, visit[www.linaro.org](/) diff --git a/src/content/blogs/whats-new-qemu-2-9.mdx b/src/content/blogs/whats-new-qemu-2-9.mdx new file mode 100644 index 0000000..7377a27 --- /dev/null +++ b/src/content/blogs/whats-new-qemu-2-9.mdx @@ -0,0 +1,63 @@ +--- +title: What's new in QEMU 2.9 +description: In this article, Alex Bennée provides an overview of what's new in + the latest 2.9 version of QEMU. Read more here! +image: linaro-website/images/blog/Banner_Virtualization +tags: + - qemu + - virtualization +author: alex-bennee +date: 2017-04-19T16:46:38.000Z +link: /blog/core-dump/whats-new-qemu-2-9/ +related: [] + +--- + +![lightbox\_disabled=True Core Dump Banner](/linaro-website/images/blog/core-dump) + +QEMU is an interesting multi-faceted open source project. It is a standard component for the Linux virtualisation stack, used by both the KVM and Xen hypervisors for device emulation. Thanks to its dynamic just-in-time recompilation engine known as the Tiny Code Generator (TCG) it is also capable of emulating other architectures on a number of hosts. This takes the form of either a full system emulation or the lighter weight user-mode emulation that allows foreign user-space binaries to be run alongside the rest of the host system. + +Started in 2003 by [Fabrice Bellard](https://en.wikipedia.org/wiki/Fabrice_Bellard) QEMU is now maintained by a community of mostly corporate sponsored engineers, although unaffiliated individuals are still the second largest set of contributors. The projects codebase has continued to grow over the years and it now has reached the point of making around 3 stable releases a year, typically one in April, August and December. + +Linaro engineers takes an active part in development and maintenance of the project and we thought it would be useful provide an update on Arm related features in the up-coming [2.9 release](https://wiki.qemu-project.org/index.php/ChangeLog/2.9). + +## 1 AArch64 EL2 Support for TCG + +Building on previous work to enable EL3 (the secure CPU mode provided by the security extensions as part of TrustZone) we now fully support the hypervisor CPU exception level EL2. As most Arm hypervisors require support for virtualization in the interrupt controller as well, and since we only support the virtualization extensions in our emulated GICv3 (not GICv2), users who want to run hypervisors in the emulated AArch64 machine using EL2 must so far select a GICv3 interrupt controller for the emulated machine. + +```bash + qemu-system-aarch64 ${QEMU_OPTS} \ + -machine gic-version=3 \ + -machine virtualization=true +``` + +This is especially useful if you want to debug 64-bit Arm hypervisor and often developers don't have access to AArch64 hardware while traveling or attending conference. + +While it is still slow compared to running KVM on real hardware, it is convenient for testing EL2 code on a developers desktop with the power of the GDB stub. QEMU is often used extensively for automated testing and CI, and supporting hypervisors inside emulated environments is crucial for supporting CI on Arm using commodity x86 hardware. With the introduction of MTTCG (see next section), this even scales for multi-core and can be used to discover SMP-related race conditions. + +## 2 Multi-threaded TCG for System Emulation + +Previously system emulation in QEMU has been single-threaded - with a single host thread emulating all the guest’s vCPUs. As many-core SMP-systems are more and more commonplace, this has slowly become more of a bottleneck in QEMU's performance. The multi-threaded TCG project (also known as MTTCG) is the culmination of several years of shared effort between commercial, community and academic contributors. Linaro is proud to be heavily involved in coding, reviewing, and helping get this feature accepted upstream. + +While the work has focused on system emulation a number of the updates have also had benefits for the rest of TCG emulation including the efficient QHT translation-cache lookup algorithm and completely overhauling how TCG deals with emulating atomic operations. If you are interested in a more detailed write-up of the technical choices made we wrote an [article for LWN last year.](https://lwn.net/Articles/697265/) + +This work finally removes the single-threaded bottlenecks from system emulation, but it is not a performance panacea. As long as you have unused CPU cores on your host machine you should see performance improvement for each new vCPU you add to your guest up until around 8 cores. At that point the cost of keeping the system behaviour coherent will eventually catch-up with you. + +The core technology on which MTTCG relies is target agnostic and designed so all the various architectures QEMU emulates can take advantage of it. However each front-end needs to make changes to their emulation to ensure they take advantage of the new TCG facilities for modelling atomic and barrier operations. + +Currently MTTCG is enabled by default for both 32 and 64 bit Arm chips as well as the Alpha architecture when running on an x8664 host. This is by far the most common use case for Arm emulation. + +## 3 Cortex M fixes + +In the last few years Linaro has been mostly concentrating on the A-profile (Application profile) Arm processors. These are the ones designed to run full-stack operating systems like Linux. With the growing interest in Internet of Things (IoT), we are starting to turn our attention to the M-profile (Microcontroller). The Microcontroller profile processors are targeted at much more constrained low-latency, and low-power deeply embedded applications. Their memory is usually measured in kilobytes (kB) rather than megabytes (MB) so they tend to run custom run-loops or highly constrained real-time operating systems (RTOS) like [Zephyr](https://www.zephyrproject.org/). + +While QEMU nominally supports the Cortex-M3 processor, support for boards using it has been sporadic and the result is a situation where there have been long standing un-fixed bugs and important features missing. As the architecture has progressed support for the newer M-profile CPUs has also lagged. +The 2.9 release sees a number of fixes to the Cortex-M series emulation as we ramp up our efforts to improve QEMU's microcontroller support. The fixes have so far been aimed at architectural aspects which were known to be broken, such as the NVIC emulation. However part of the discussion at our recent [BUD17 session](https://resources.linaro.org/en/resource/QFtiNVcvyfvprc75bNtqkX) was looking at what features we should prioritise for future QEMU releases. And we are currently focusing on getting MPU support upstream and supporting v8m. + +This summary is not intended to be exhaustive and has concentrated on Arm specific features. For example we have not covered updates to the common sub-systems shared by all architectures. For those interested in all the details, the [full changelog](http://wiki.qemu.org/ChangeLog/2.9) is worth a read. + + + +*** + +

diff --git a/src/content/blogs/when-will-uefi-and-acpi-be-ready-on-arm.mdx b/src/content/blogs/when-will-uefi-and-acpi-be-ready-on-arm.mdx new file mode 100644 index 0000000..6537e69 --- /dev/null +++ b/src/content/blogs/when-will-uefi-and-acpi-be-ready-on-arm.mdx @@ -0,0 +1,140 @@ +--- +keywords: Linaro, software on Arm, Linux, Linux on Arm, UEFI, ACPI, enterprisse + software, Arm, U-Boot, FDT, kernel +title: When Will UEFI and ACPI Be Ready On Arm? +description: In this article, Grant Likely takes a detailed look at when will + UEFI & ACPI be ready on ARM. Click here to the latest updates on this topic! +image: linaro-website/images/blog/RGB-Linaro_Standard +author: grant-likely +date: 2014-02-03T18:58:27.000Z +tags: + - linux-kernel +link: /blog/when-will-uefi-and-acpi-be-ready-on-arm/ +related: [] + +--- + +As part of the work to prepare for Arm servers, the Linaro Enterprise Group has spent the last year getting ACPI and UEFI working on Arm. We’ve been working closely with Arm and Arm’s partners on this to make sure the firmware architecture meets the needs of the server market. + +Yet this work has raised questions about what it means for the rest of the Arm Linux world. Why are we doing UEFI & ACPI? Who should be using UEFI/ACPI? Will U-Boot and FDT continue to be supported? Can hardware provide both ACPI & FDT? Can ACPI and FDT coexist? And so on. I want to quickly address those questions in this blog post, and then I want to discuss a development plan to get UEFI and ACPI onto shipping servers. + +### Table Of Content + +Why UEFI and ACPI? +Current Status +What Should Vendors Do? +For Hardware Shipping Very Shortly +For a Year From Now +The Long View +Implementation Details +UEFI +GRUB on UEFI +Linux on UEFI (CONFIG\_EFI\_STUB) +ACPI + +## **Why UEFI and ACPI?** + +Note: I am only talking about general purpose Armv8 servers here. Not mobile, not embedded. At this present time, I don’t see any compelling reason to adopt ACPI outside of the server market. If you are not doing server work you can stop reading right now and keep using what you already have. + +The short answer is, “UEFI and ACPI should be used because Arm’s server specifications will require it.”, but that just leads the question, “Why do the specifications require it?” Arm has spent the last couple of years consulting with its partners to develop a common platform for Arm servers. Those partners include OS, hardware, and silicon vendors as well as other interested parties. + +Firmware design was a big part of those consultations. The two big questions were, what firmware interface should be specified, and what hardware description should be used? First of all, it is important to note that while many of the same people are involved, UEFI and ACPI are not the same thing. UEFI is not tied to ACPI and will happily work with an FDT. Similarly, ACPI does not depend on UEFI, and can be made to work just fine with U-Boot. + +On firmware interface, choosing UEFI was a pretty easy decision. UEFI has a specification, an open source BSD-licensed implementation, and the mainline project has Arm support. UEFI specifies how an OS loader is obtained from disk or the network and executed, and we have tools to work with it on Linux. Plus it works exactly the same way on x86. This makes life far simpler for vendors who already have tooling based on UEFI, and for end users who don’t have to learn something new. Supporting UEFI has minimal impact and doesn’t impose a major burden on Linux developers. When compared with U-Boot it was no contest. U-Boot is great in the environments that it grew up in, but it doesn’t provide any of the consistency that is absolutely required for a general purpose platform. + +ACPI was a harder decision, particularly for us Linux folks. We’ve spent the past 3 years focusing on FDT development, and ACPI uses a different model. FDT is based on the model where the kernel drives all hardware right down to the clocks and regulators. The FDT merely describes how the components are configured and wired together. ACPI on the other hand moves a lot of the low level wiring details into the ACPI bytecode so that the kernel doesn’t need to be aware of power management’s details. For Arm Linux this is an issue because it runs completely counter to all the work we’ve done on clock, regulator, gpio and power management frameworks; work that is absolutely essential when using board files or FDT, but may conflict when PM control is managed by ACPI. There is a lot of work that we need to do in order to get ACPI working on Arm Linux, especially since adding ACPI must not break existing board support. + +Hardware and silicon vendors look at ACPI in a very different way than kernel engineers. To begin with they already have hardware and process built around ACPI descriptions. Platform management tools are integrated with ACPI and they want to use the same technology between their x86 and Arm product offerings. They also go to great lengths to ensure that existing OS releases will boot on their hardware without patches to the kernel. Using ACPI allows them limited control over low level details of the platform so that they can abstract away differences between systems. + +We kernel engineers don’t like to give up that control. There have certainly been enough instances where firmware has abused that control to the frustration of kernel hackers. Yet by and large the system works and there is a very healthy ecosystem around platforms using ACPI. + +Ultimately, Arm and the companies it consulted came to the consensus that ACPI is the best choice for the Arm servers. I personally think it is the right decision. It helps that both UEFI and ACPI specs are maintained under the umbrella of the UEFI Forum, which any company is welcome to join if they want to be involved in specification development. There are a lot of Linux people involved with the UEFI and ACPI working groups these days. + +I expect Arm will be publishing a firmware document requiring both UEFI and ACPI in the near future. + +## **Current Status** + +At this present moment, mainline only supports FDT. I think I’m safe in saying that among the Arm kernel maintainers we’re committed to FDT. It is not going away. Any hardware that provides an FDT that boots mainline Linux will continue to be supported. You can build a device with FDT and it will be supported for the long term. Similarly, there are no plans to deprecate U-Boot support, or any other boot loader for that matter. ACPI and UEFI support will happily coexist with FDT and support for other bootloaders. + +ACPI support is not yet in mainline. The patches for Arm are done and have been posted to the mailing list for review. I expect that they will get merged in v3.15 or v3.16 of the kernel. Now, work has shifted to working out best practices for using ACPI on Arm. At the moment we don’t yet know what a “good” set of Arm ACPI tables should look like. Nor do we know how existing kernel device drivers and infrastructure should work when ACPI is provided. Until those questions are answered, ACPI isn’t ready to use. Getting those answers is going to take some time. + +So, for the vendors who do want to use ACPI, what are they supposed to do? Ship ACPI (which doesn’t work yet)? Ship FDT and upgrade to ACPI later? Ship both (but how does that work)? In an effort to clarify, here is how I see the world: + +## **What Should Vendors Do?** + +Given the current state of mainline support, what should vendors ship on their hardware? In typically helpful form, I answer, “it depends”. To keep the answer simple, I’ve split up my suggestions into three categories based on when hardware is going to ship: immediately, in the next year, and in the long term (2+ years). + +## **For Hardware Shipping Very Shortly** + +There are two questions to answer, which firmware should vendors use, and which hardware description. I’ll start with firmware. At this moment, Linux UEFI support is essentially complete. The patches have been reviewed positively and will probably get merged in the next merge window. UEFI will also work equally well with either an FDT or an ACPI hardware description. Plus the TianoCore UEFI project can already boot a Linux kernel without any additional patches. Anyone planning to ship servers is the near future should plan on using UEFI right from the start. + +UEFI is important because it provides a standard protocol and runtime for an OS to install itself. This is critical for distributions because it gets away from the hardware-specific install scripts that they have to do for U-Boot right now. UEFI has been working on Arm for years. Kernel patches for CONFIG\_EFI\_STUB and runtime services are under review for Arm321,2 and Arm643 and should get merged soon. If you want a generic distribution image to boot on your hardware, then use UEFI. + +ACPI is another matter. While basic support patches are in the process of getting reviewed for merging, there is still a lot of work to be done on the infrastructure side to get ACPI working well. It is still going to take some time before we can claim that the kernel will support ACPI systems. ACPI should be considered experimental at this time and expect changes will be required before being usable by the kernel. I suggest that any server vendor shipping hardware in the near future should make firmware provide an FDT. + +Stability also used to be an issue for FDT, but we’ve hit the point where the majority of FDT support is in mainline. It is no longer necessary to update the FDT in lock step with the kernel. We debated the problem at the 2013 Arm kernel summit in Edinburgh and made the decision that the FDT is a stable ABI once it hits mainline. If the ABI gets changed in a way that breaks users, then it is a bug and it must be fixed. Therefore, upgrading the kernel shall not require an FDT upgrade, even if it means we need to carry some legacy translation code for older bindings.4 + +That said, there are other valid reasons for upgrading the FDT, so vendors should allow for that when designing firmware. For instance, the kernel will not support hardware that isn’t described in the FDT. An FDT update would be required to enable previously hidden functionality. Additionally, bugs in FDT data should be fixed with an FDT update. We don’t want to be dealing with individual bug workarounds in the kernel that can be easily repaired in the data. + +A vendor can provide ACPI tables alongside the FDT, but in doing so I would strongly recommend providing it as an experimental feature and not the default boot behavior. + +On a related note, UEFI may also provide SMBIOS to the kernel regardless of whether ACPI or FDT is used. Vendors who want to provide SMBIOS data should feel free to do so. SMBIOS is an independent table which can provide identification information about the platform that is useful for asset management. SMBIOS is maintained by a [separate spec](http://dmtf.org/standards/smbios). A simple SMBIOS patch (http://comments.gmane.org/gmane.linux.ports.arm.kernel/282504) has been posted enabling it on Arm. + +FDT, SMBIOS and ACPI tables are provided to the kernel via the UEFI Configuration Table. The configuration table is a list of key value pairs. Keys are well known GUIDs, and the value is a pointer to the data structure. SMBIOS and ACPI GUIDs are specified in the UEFI spec. The FDT GUID has been posted for review. FDT and SMBIOS data structures must be in memory allocated as EFI\_RUNTIME\_DATA. + +## **For a Year From Now** + +In about a year from now I would make the prediction that ACPI support is in mainline. My recommendations are the same as above, with the following exceptions: + +For widest range of support, platforms should support both FDT and ACPI. Some operating systems will only support ACPI, others only FDT. ACPI will probably be stabilizing to the point that if support is in mainline, then we will continue to support the platform in Linux. + +My opinion is that Linux should use only FDT or only ACPI, but not both! \[Edit: by this I mean not both at the same time. It is perfectly fine for an OS to have support for both, as long as only one is used at a time] I think that when provided with both, the kernel should default to ACPI and ignore the FDT (this is up for debate; Eventually I think this is what the kernel should do, and I think we should start with that policy simply because trying to change the policy at some arbitrary point in time will probably be a lot more painful than starting with the default that we want to ultimately get to). + +## **The Long View** + +Servers must provide ACPI, but vendors can optionally choose to provide an FDT if they need to support an OS which doesn’t have ACPI support. For example, this may be an issue for the Xen hypervisor which does not yet have a design for adding Arm ACPI support. The kernel should prefer ACPI if provided, but there are no plans to deprecate FDT support. As far as the kernel is concerned, FDT and ACPI are on equal footing. We will not refuse to boot a server that provides FDT. + +I cannot speak for OS vendors and hardware vendors on this topic. They may make their own statements on what is required to support the platform. So, while the kernel will fully support both FDT and ACPI descriptions, vendors may require ACPI. + +## **Implementation Details** + +Here I’m going to talk about how everything works together. There are a lot of moving parts in the firmware architecture described above, so it helps to have a description of how the parts interact. + +## **UEFI** + +The TianoCore UEFI project has a complete, open source UEFI implementation that includes support for both 32 and 64 bit Arm architectures. It can be used to build UEFI firmware which is compliant with the UEFI spec. UEFI cannot boot Linux directly, but requires a Linux specific OS loader which is not part of the UEFI spec. There is a legacy LinuxLoader in the UEFI tree, but as it is not standardized there is no guarantee that it will be included in firmware. Best practice is to use the native UEFI support in the kernel. + +UEFI passes all hardware description tables to an OS loader via the UEFI configuration table. + +## **GRUB on UEFI** + +GRUB UEFI support has been ported to Arm and works almost identically to GRUB UEFI on x86. The patches have been merged into mainline and will be part of the GRUB release 2.02. + +Internally, the most significant difference between x86 and Arm GRUB support is that on x86 GRUB the boot\_params structure is used to pass additional data to the kernel, while on Arm it uses an FDT. + +## **Linux on UEFI (CONFIG\_EFI\_STUB)** + +The current set of ready-to-merge patches to the Linux kernel add support for both CONFIG\_EFI\_STUB and UEFI runtime services. CONFIG\_EFI\_STUB embeds a UEFI OS loader into the kernel image itself which allows UEFI to boot the kernel as a native UEFI binary. The stub takes care of setting up the system the way Linux wants it and jumping into the kernel. The kernel-proper entry point remains exactly the same as it is now and a CONFIG\_EFI\_STUB kernel is still bootable on U-Boot and other bootloaders. + +The kernel proper still requires an FDT pointer to be passed at boot time, so the UEFI stub is responsible to parse the UEFI data, set up the environment including an FDT, and jump into the kernel proper. When booting with FDT, the stub will obtain the FDT from UEFI and pass it directly to the kernel. When booting with ACPI, an empty FDT is created and used to pass boot parameters (kernel command line, initrd location, memory map, system table pointer, etc.) similar to how x86 uses the boot\_params structure. + +If both ACPI and FDT are provided by firmware, then all hardware description in the FDT will be ignored. The kernel should never attempt use ACPI and FDT hardware descriptions at the same time.5 + +UEFI runtime services are also supported. The stub will pass the UEFI system table pointer through to the kernel and the kernel will reserve UEFI memory regions so that it can call back into UEFI code to query and manipulate boot variables, the hardware clock, and system wakeup. + +## **ACPI** + +As described above, the kernel will use ACPI if present in the configuration table, and fall back to FDT otherwise. The kernel will not attempt to use both ACPI and FDT hardware descriptions. + +One potential problem is that Kexec may interact poorly with ACPI. The OS isn’t supposed to unpack the DSDT more than once, which would happen if the kernel kexecs into another kernel (each kernel will unpack it on boot). However, x86 has been doing kexec for years so this may not actually be a problem in the real world. + +*Re-published with permission from Grant Likely from his original blog post.* + +*** + +1. Arm32 Runtime Service: [http://lwn.net/Articles/575363/](http://lwn.net/Articles/575363/) +2. Arm32 CONFIG\_EFI\_STUB: [http://lwn.net/Articles/575352/](http://lwn.net/Articles/575352/) +3. Arm64 CONFIG\_EFI\_STUB and Runtime services: + [https://lkml.org/lkml/2013/11/29/373](https://lkml.org/lkml/2013/11/29/373) +4. With the caveat that if nobody notices, is it really an ABI breakage? There are many embedded platforms which want to keep the FDT in lock step with the kernel and the build toolchain reflects that +5. This is still up for debate, the priority of ACPI over FDT may yet be changed diff --git a/src/content/blogs/xilinx-joins-linaro-iot-embedded-group.mdx b/src/content/blogs/xilinx-joins-linaro-iot-embedded-group.mdx new file mode 100644 index 0000000..ffd9f39 --- /dev/null +++ b/src/content/blogs/xilinx-joins-linaro-iot-embedded-group.mdx @@ -0,0 +1,29 @@ +--- +author: linaro +date: 2017-09-25T14:58:01.000Z +description: Linaro Ltd, the open source collaborative engineering organization + developing software for the Arm® ecosystem, today announced that Xilinx has + joined the Linaro IoT and Embedded (LITE) Segment Group. +link: /news/xilinx-joins-linaro-iot-embedded-group/ +title: Xilinx joins Linaro IoT and Embedded Group +tags: [] +related: [] + +--- + +Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, today announced that Xilinx has joined the Linaro IoT and Embedded (LITE) Segment Group. + +LITE members work collaboratively in Linaro on reducing fragmentation in operating systems, middleware and cloud connectivity solutions, and delivering open source device reference platforms to enable faster time to market, improved security and lower maintenance costs for connected products. With a key goal to complete essential, non-differentiating, shared work as reliably as possible, Linaro relies on open source platforms - including Zephyr for Arm Cortex-M devices and EdgeX for Cortex-A gateways - to develop, continuously integrate and test code. + +“We’re excited to welcome Xilinx as a member of LITE,” said Matt Locke, LITE Director. “Discussions between Linaro and Xilinx have ranged from LITE gateway and security work through networking, 96Boards, and Xilinx All Programmable SoC and MPSoC platforms. I expect initial collaboration will focus on the gateway, but I look forward to building on this relationship to bring the benefits of collaborative, open-source engineering to other areas in Xilinx’s broad range of product offerings.” + +“Becoming a member of the LITE group will enable Xilinx to optimize the Linaro open source stacks with our All Programmable SoCs”, said Tomas Evensen, CTO Embedded Software at Xilinx. “We are looking forward to collaborating with the LITE community to enable end-to-end stacks in the IoT and embedded space.” + +**About Xilinx** + +Xilinx is a leading provider of All Programmable semiconductor products, including FPGAs, SoCs, MPSoCs, RFSoCs, and 3D ICs. Xilinx uniquely enables applications that are both software defined and hardware optimized – powering industry advancements in Cloud Computing, 5G Wireless, Embedded Vision, and Industrial IoT. For more information, visit [www.xilinx.com](http://www.xilinx.com). + +**About Linaro** + +Linaro is leading collaboration on open source development in the Arm ecosystem. The company has over 300 engineers working on consolidating and optimizing open source software for the Arm architecture, including developer tools, the Linux kernel, Arm power management, and other software infrastructure. Linaro is distribution neutral: it wants to provide the best software foundations to everyone by working upstream, and to reduce non-differentiating and costly low level fragmentation. The effectiveness of the Linaro approach has been demonstrated by Linaro’s growing membership, and by Linaro consistently being listed as one of the top five company contributors, worldwide, to Linux kernels since 3.10. +To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to all online. To find out more, please visit []() and [https://www.96Boards.org](https://www.96Boards.org). diff --git a/src/content/config.ts b/src/content/config.ts index 49071f0..0f341e3 100644 --- a/src/content/config.ts +++ b/src/content/config.ts @@ -64,7 +64,7 @@ const blogs = defineCollection({ title: z.string(), description: z.string(), date: z.date(), - image: z.string(), + image: z.string().optional().default("linaro-website/graphics/bg-logo-2"), tags: z.array(reference("tags")), author: reference("authors"), related: z.array(reference("blogs")), diff --git a/src/layouts/BaseLayout.astro b/src/layouts/BaseLayout.astro index 63bc599..be749dc 100644 --- a/src/layouts/BaseLayout.astro +++ b/src/layouts/BaseLayout.astro @@ -76,7 +76,7 @@ const { title, description, type = "website" } = Astro.props; const payload = {} as Record; data.forEach((value, key) => (payload[key] = value)); - delete payload.agreed + delete payload.agreed; button?.classList.add("hidden"); loader?.classList.remove("hidden"); From 587349eb29e3c8486c96d802f7f5e058906a6d61 Mon Sep 17 00:00:00 2001 From: Louis Date: Thu, 23 May 2024 09:55:24 +0100 Subject: [PATCH 4/5] finish migrating wordpress era blogs --- src/content/authors/peter-maydell.md | 1 + ...ynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx | 2 +- .../blogs/linaro-announces-opendataplane-tigermoth.mdx | 2 +- src/content/blogs/linaro-launches-96boards-ai-platform.mdx | 1 - src/content/blogs/meltdown-spectre.mdx | 1 - .../blogs/softbank-joins-96boards-steering-committee.mdx | 3 --- src/content/blogs/testing-a-trusted-execution-environment.mdx | 2 +- .../blogs/tier-IV-joins-96boards-steering-committee.mdx | 1 - src/lib/cloudinary.ts | 4 +++- 9 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/content/authors/peter-maydell.md b/src/content/authors/peter-maydell.md index 796e435..d3c16ce 100644 --- a/src/content/authors/peter-maydell.md +++ b/src/content/authors/peter-maydell.md @@ -4,3 +4,4 @@ first_name: Peter last_name: Maydell image: linaro-website/images/author/unknown --- + \ No newline at end of file diff --git a/src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx b/src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx index 49145df..87dd3d7 100644 --- a/src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx +++ b/src/content/blogs/linaro-announces-keynote-speakers-demos-upcoming-linaro-connect-hong-kong.mdx @@ -19,7 +19,7 @@ Linaro, the collaborative engineering organization developing open source softwa * Jon Masters - Chief Arm Architect, Redhat -* Dejan Milojicic - Senior Researcher & Manager, HP Labs +* Dejan Milojicic - Senior Researcher & Manager, HP Labs! * Bob Monkman - Enterprise Segment Marketing Manager, Arm diff --git a/src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx b/src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx index 8c1e002..724dbd5 100644 --- a/src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx +++ b/src/content/blogs/linaro-announces-opendataplane-tigermoth.mdx @@ -150,4 +150,4 @@ been demonstrated by Linaro’s growing membership, and by Linaro consistently b one of the top five company contributors, worldwide, to Linux kernels since 3.10. To ensure commercial quality software, Linaro’s work includes comprehensive test and validation on member hardware platforms. The full scope of Linaro engineering work is open to -all online. To find out more, please visit [https://www.linaro.org](/) and [https://www.96Boards.org](https://www.96Boards.org/). +all online. To find out more, please visit https://www.linaro.org and [https://www.96Boards.org](https://www.96Boards.org/). diff --git a/src/content/blogs/linaro-launches-96boards-ai-platform.mdx b/src/content/blogs/linaro-launches-96boards-ai-platform.mdx index b73cd49..b094a55 100644 --- a/src/content/blogs/linaro-launches-96boards-ai-platform.mdx +++ b/src/content/blogs/linaro-launches-96boards-ai-platform.mdx @@ -3,7 +3,6 @@ keywords: Linaro, Connect, HKG18, 96Boards, Artificial Intelligence, AI, Platfor title: Linaro Announces Launch of 96Boards AI Platform description: Linaro announces 96Boards.ai and availability of a range of compatible member 96Boards platforms for developers. Read more here. -image: linaro-website/images/blog/96boards-ai tags: - linaro-connect - ai-ml diff --git a/src/content/blogs/meltdown-spectre.mdx b/src/content/blogs/meltdown-spectre.mdx index 4571edc..f666b2a 100644 --- a/src/content/blogs/meltdown-spectre.mdx +++ b/src/content/blogs/meltdown-spectre.mdx @@ -6,7 +6,6 @@ title: Implications of Meltdown and Spectre : Part 1 description: In this article, Joakim Bech looks at the implications of meltdown & spectre in practice and how it could affect secure domains like TrustZone. Read more here! -image: linaro-website/images/blog/meltdown-spectre-logo tags: - arm author: joakim-bech diff --git a/src/content/blogs/softbank-joins-96boards-steering-committee.mdx b/src/content/blogs/softbank-joins-96boards-steering-committee.mdx index 196c07a..ab8f339 100644 --- a/src/content/blogs/softbank-joins-96boards-steering-committee.mdx +++ b/src/content/blogs/softbank-joins-96boards-steering-committee.mdx @@ -6,7 +6,6 @@ description: Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, announced today that Japanese telecommunications giant SoftBank Corp. has joined the 96Boards initiative as a Steering Committee member. -image: linaro-website/images/blog/softbank-joins-96boards tags: - open-source - arm @@ -25,8 +24,6 @@ The 96Boards steering committee now includes more than twenty companies who are “We are excited to welcome SoftBank to the 96Boards Steering Committee and look forward to  the benefits they will bring to the 96Boards range of specifications and products built around the specifications” said Yang Zhang, Director of 96Boards. “SoftBank is uniquely positioned to help connect the vast range of vendors from across the ecosystem and foster accelerated collaboration on standardized platforms for the future.” -![SoftBank joins 96Boards Steering Committee](/linaro-website/images/blog/softbank-joins-96boards) - ### About SoftBank SoftBank Corp., a subsidiary of SoftBank Group Corp. (TOKYO:9984), provides mobile communication, fixed-line communication and Internet connection services to customers in Japan. Leveraging synergies with other companies in the SoftBank Group, SoftBank Corp. aims to transform lifestyles through ICT and expand into other business areas including IoT, robotics and energy. To learn more, please visit [www.softbank.jp/en/corp/group/sbm/](http://www.softbank.jp/en/corp/group/sbm/). diff --git a/src/content/blogs/testing-a-trusted-execution-environment.mdx b/src/content/blogs/testing-a-trusted-execution-environment.mdx index ed1c4ce..22b1a43 100644 --- a/src/content/blogs/testing-a-trusted-execution-environment.mdx +++ b/src/content/blogs/testing-a-trusted-execution-environment.mdx @@ -58,7 +58,7 @@ The host application, which by the way is the one we call “xtest”, has been * [xtest\_10000.c ](https://github.com/OP-TEE/optee_test/tree/master/host/xtest)has test code containing **extensions** going beyond the GlobalPlatform specifications. For example, this is where we are testing key derivation functionality like PBKDF2, HKDF and Concat KDF. -* [xtest\_20000.c ](https://github.com/OP-TEE/optee_test/tree/master/host/xtest)this file also has tests related to storage, but this time those are more aimed at the **secure storage** implementation as such and they verify that files are actually being written to the file system, checking that they haven’t been corrupted and that they are being deleted etc. As an example, when initiating a store operation from secure world there should be file(s) created in Linux and accessible at ***/data/tee/{directory}/{filename}/block.xxx***. +* [xtest\_20000.c ](https://github.com/OP-TEE/optee_test/tree/master/host/xtest)this file also has tests related to storage, but this time those are more aimed at the **secure storage** implementation as such and they verify that files are actually being written to the file system, checking that they haven’t been corrupted and that they are being deleted etc. As an example, when initiating a store operation from secure world there should be file(s) created in Linux and accessible at ***`/data/tee/{directory}/{filename}/block.xxx`***. * [xtest\_benchmark\_1000.c](https://github.com/OP-TEE/optee_test/blob/master/host/xtest/benchmark_1000.c): This is so far the only file related to **benchmarking** and it contains a couple of benchmark tests for the **secure storage** implementation. diff --git a/src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx b/src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx index c11c79f..7ce8af0 100644 --- a/src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx +++ b/src/content/blogs/tier-IV-joins-96boards-steering-committee.mdx @@ -6,7 +6,6 @@ description: Linaro Ltd, the open source collaborative engineering organization developing software for the Arm® ecosystem, announced today that Japan-based intelligent vehicle technology company Tier IV, Inc. has joined the 96Boards initiative as a Steering Committee member. -image: linaro-website/images/blog/96boards-home-page-latest tags: - arm - linux-kernel diff --git a/src/lib/cloudinary.ts b/src/lib/cloudinary.ts index aa6c9c7..5f99a7d 100644 --- a/src/lib/cloudinary.ts +++ b/src/lib/cloudinary.ts @@ -60,7 +60,9 @@ export const getCloudinarySrc = ({ src, ...props }: Props) => { }, }); - const imageSource = src.startsWith("https://") + const parsedSource = src.startsWith("/") ? src.slice(1) : src; + + const imageSource = parsedSource.startsWith("https://") ? cloudinaryMedia.image(src).setDeliveryType("fetch") : cloudinaryMedia.image(src); From 6b5ed224d3b5d9799fa62a227e2c79006f3ee98c Mon Sep 17 00:00:00 2001 From: Louis Date: Thu, 23 May 2024 10:08:14 +0100 Subject: [PATCH 5/5] fix meltdown spectre title --- src/content/blogs/meltdown-spectre.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/content/blogs/meltdown-spectre.mdx b/src/content/blogs/meltdown-spectre.mdx index f666b2a..8170ab6 100644 --- a/src/content/blogs/meltdown-spectre.mdx +++ b/src/content/blogs/meltdown-spectre.mdx @@ -2,7 +2,8 @@ keywords: Meltdown, Spectre, Arm, OP-TEE, Trustzone, Speculative execution, branch predictor, CPU cache, Set-Associative-Cache, side channel attack, Simple Power Analysis, Differential Power Analysis, crypto, -title: Implications of Meltdown and Spectre : Part 1 +title: > + Implications of Meltdown and Spectre: Part 1 description: In this article, Joakim Bech looks at the implications of meltdown & spectre in practice and how it could affect secure domains like TrustZone. Read more here!