From a69b0e993f6f84443e5826d338196400742520a9 Mon Sep 17 00:00:00 2001 From: Harish Senthilkumar Date: Wed, 8 Jan 2025 20:06:47 +0000 Subject: [PATCH] Upgrade to latest NVIDIA NVML package --- ecs-init/go.mod | 4 +- ecs-init/go.sum | 8 +- ecs-init/gpu/generate_mocks.go | 16 + ecs-init/gpu/interface.go | 21 + ecs-init/gpu/mocks/mock_gpu_device.go | 3420 +++++ ecs-init/gpu/nvidia_gpu_manager.go | 51 +- ecs-init/gpu/nvidia_gpu_manager_test.go | 153 +- .../vendor/github.com/NVIDIA/go-nvml/LICENSE | 202 + .../github.com/NVIDIA/go-nvml/pkg/dl/dl.go | 117 + .../NVIDIA/go-nvml/pkg/dl/dl_linux.go | 26 + .../github.com/NVIDIA/go-nvml/pkg/nvml/api.go | 56 + .../NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h | 23 + .../go-nvml/pkg/nvml/cgo_helpers_static.go | 75 + .../NVIDIA/go-nvml/pkg/nvml/const.go | 1538 +++ .../NVIDIA/go-nvml/pkg/nvml/const_static.go | 27 + .../NVIDIA/go-nvml/pkg/nvml/device.go | 3057 +++++ .../github.com/NVIDIA/go-nvml/pkg/nvml/doc.go | 21 + .../go-nvml/pkg/nvml/dynamicLibrary_mock.go | 157 + .../NVIDIA/go-nvml/pkg/nvml/event_set.go | 73 + .../github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go | 172 + .../NVIDIA/go-nvml/pkg/nvml/init.go | 48 + .../github.com/NVIDIA/go-nvml/pkg/nvml/lib.go | 291 + .../NVIDIA/go-nvml/pkg/nvml/nvml.go | 3310 +++++ .../github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h | 11101 ++++++++++++++++ .../NVIDIA/go-nvml/pkg/nvml/refcount.go | 31 + .../NVIDIA/go-nvml/pkg/nvml/return.go | 103 + .../NVIDIA/go-nvml/pkg/nvml/system.go | 138 + .../NVIDIA/go-nvml/pkg/nvml/types_gen.go | 908 ++ .../NVIDIA/go-nvml/pkg/nvml/unit.go | 113 + .../NVIDIA/go-nvml/pkg/nvml/vgpu.go | 480 + .../go-nvml/pkg/nvml/zz_generated.api.go | 1007 ++ .../NVIDIA/gpu-monitoring-tools/LICENSE | 29 - .../bindings/go/nvml/bindings.go | 634 - .../bindings/go/nvml/nvml.go | 533 - .../bindings/go/nvml/nvml.h | 5871 -------- .../bindings/go/nvml/nvml_dl.c | 46 - .../bindings/go/nvml/nvml_dl.h | 15 - .../golang/mock/mockgen/model/model.go | 495 + .../testify/assert/assertion_compare.go | 28 +- .../assert/assertion_compare_can_convert.go | 16 - .../assert/assertion_compare_legacy.go | 16 - .../testify/assert/assertion_format.go | 32 +- .../testify/assert/assertion_forward.go | 59 +- .../stretchr/testify/assert/assertions.go | 207 +- .../testify/assert/http_assertions.go | 27 +- .../stretchr/testify/require/require.go | 65 +- .../testify/require/require_forward.go | 59 +- ecs-init/vendor/modules.txt | 12 +- 48 files changed, 27480 insertions(+), 7411 deletions(-) create mode 100644 ecs-init/gpu/generate_mocks.go create mode 100644 ecs-init/gpu/interface.go create mode 100644 ecs-init/gpu/mocks/mock_gpu_device.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/LICENSE create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl_linux.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/api.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers_static.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_static.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/dynamicLibrary_mock.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/refcount.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go create mode 100644 ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go delete mode 100644 ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/LICENSE delete mode 100644 ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/bindings.go delete mode 100644 ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.go delete mode 100644 ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.h delete mode 100644 ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.c delete mode 100644 ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.h create mode 100644 ecs-init/vendor/github.com/golang/mock/mockgen/model/model.go delete mode 100644 ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go delete mode 100644 ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go diff --git a/ecs-init/go.mod b/ecs-init/go.mod index eccb5e23331..49f3bc2cecf 100644 --- a/ecs-init/go.mod +++ b/ecs-init/go.mod @@ -3,7 +3,7 @@ module github.com/aws/amazon-ecs-agent/ecs-init go 1.22 require ( - github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20180829222009-86f2a9fac6c5 + github.com/NVIDIA/go-nvml v0.12.4-0 github.com/aws/aws-sdk-go-v2 v1.31.0 github.com/aws/aws-sdk-go-v2/config v1.27.37 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 @@ -16,7 +16,7 @@ require ( github.com/fsouza/go-dockerclient v1.10.1 github.com/golang/mock v1.6.0 github.com/pkg/errors v0.9.1 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 ) require ( diff --git a/ecs-init/go.sum b/ecs-init/go.sum index 778eba42dc9..412426c1535 100644 --- a/ecs-init/go.sum +++ b/ecs-init/go.sum @@ -6,8 +6,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.9.10 h1:TxXGNmcbQxBKVWvjvTocNb6jrPyeHlk5EiDhhgHgggs= github.com/Microsoft/hcsshim v0.9.10/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20180829222009-86f2a9fac6c5 h1:WLyvLAM0QfjAarRzRTG9EgT5McqGWNZMvqqSUSoyUUY= -github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20180829222009-86f2a9fac6c5/go.mod h1:nMOvShGpWaf0bXwXmeu4k+O4uziuaEI8pWzIj3BUrOA= +github.com/NVIDIA/go-nvml v0.12.4-0 h1:4tkbB3pT1O77JGr0gQ6uD8FrsUPqP1A/EOEm2wI1TUg= +github.com/NVIDIA/go-nvml v0.12.4-0/go.mod h1:8Llmj+1Rr+9VGGwZuRer5N/aCjxGuR5nPb/9ebBiIEQ= github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 h1:xDAuZTn4IMm8o1LnBZvmrL8JA1io4o3YWNXgohbf20g= @@ -113,8 +113,8 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= diff --git a/ecs-init/gpu/generate_mocks.go b/ecs-init/gpu/generate_mocks.go new file mode 100644 index 00000000000..65814239d0b --- /dev/null +++ b/ecs-init/gpu/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package gpu + +//go:generate mockgen -destination=mocks/mock_gpu_device.go github.com/aws/amazon-ecs-agent/ecs-init/gpu GPUDevice diff --git a/ecs-init/gpu/interface.go b/ecs-init/gpu/interface.go new file mode 100644 index 00000000000..a64fe7b3e36 --- /dev/null +++ b/ecs-init/gpu/interface.go @@ -0,0 +1,21 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package gpu + +import "github.com/NVIDIA/go-nvml/pkg/nvml" + +// GPUDevice represents the minimal interface required for GPU operations +type GPUDevice interface { + nvml.Device +} diff --git a/ecs-init/gpu/mocks/mock_gpu_device.go b/ecs-init/gpu/mocks/mock_gpu_device.go new file mode 100644 index 00000000000..ada4ba38a00 --- /dev/null +++ b/ecs-init/gpu/mocks/mock_gpu_device.go @@ -0,0 +1,3420 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/ecs-init/gpu (interfaces: GPUDevice) + +// Package mock_gpu is a generated GoMock package. +package mock_gpu + +import ( + reflect "reflect" + + nvml "github.com/NVIDIA/go-nvml/pkg/nvml" + gomock "github.com/golang/mock/gomock" +) + +// MockGPUDevice is a mock of GPUDevice interface. +type MockGPUDevice struct { + ctrl *gomock.Controller + recorder *MockGPUDeviceMockRecorder +} + +// MockGPUDeviceMockRecorder is the mock recorder for MockGPUDevice. +type MockGPUDeviceMockRecorder struct { + mock *MockGPUDevice +} + +// NewMockGPUDevice creates a new mock instance. +func NewMockGPUDevice(ctrl *gomock.Controller) *MockGPUDevice { + mock := &MockGPUDevice{ctrl: ctrl} + mock.recorder = &MockGPUDeviceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGPUDevice) EXPECT() *MockGPUDeviceMockRecorder { + return m.recorder +} + +// ClearAccountingPids mocks base method. +func (m *MockGPUDevice) ClearAccountingPids() nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClearAccountingPids") + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ClearAccountingPids indicates an expected call of ClearAccountingPids. +func (mr *MockGPUDeviceMockRecorder) ClearAccountingPids() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearAccountingPids", reflect.TypeOf((*MockGPUDevice)(nil).ClearAccountingPids)) +} + +// ClearCpuAffinity mocks base method. +func (m *MockGPUDevice) ClearCpuAffinity() nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClearCpuAffinity") + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ClearCpuAffinity indicates an expected call of ClearCpuAffinity. +func (mr *MockGPUDeviceMockRecorder) ClearCpuAffinity() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearCpuAffinity", reflect.TypeOf((*MockGPUDevice)(nil).ClearCpuAffinity)) +} + +// ClearEccErrorCounts mocks base method. +func (m *MockGPUDevice) ClearEccErrorCounts(arg0 nvml.EccCounterType) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClearEccErrorCounts", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ClearEccErrorCounts indicates an expected call of ClearEccErrorCounts. +func (mr *MockGPUDeviceMockRecorder) ClearEccErrorCounts(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearEccErrorCounts", reflect.TypeOf((*MockGPUDevice)(nil).ClearEccErrorCounts), arg0) +} + +// ClearFieldValues mocks base method. +func (m *MockGPUDevice) ClearFieldValues(arg0 []nvml.FieldValue) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClearFieldValues", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ClearFieldValues indicates an expected call of ClearFieldValues. +func (mr *MockGPUDeviceMockRecorder) ClearFieldValues(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearFieldValues", reflect.TypeOf((*MockGPUDevice)(nil).ClearFieldValues), arg0) +} + +// CreateGpuInstance mocks base method. +func (m *MockGPUDevice) CreateGpuInstance(arg0 *nvml.GpuInstanceProfileInfo) (nvml.GpuInstance, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateGpuInstance", arg0) + ret0, _ := ret[0].(nvml.GpuInstance) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// CreateGpuInstance indicates an expected call of CreateGpuInstance. +func (mr *MockGPUDeviceMockRecorder) CreateGpuInstance(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGpuInstance", reflect.TypeOf((*MockGPUDevice)(nil).CreateGpuInstance), arg0) +} + +// CreateGpuInstanceWithPlacement mocks base method. +func (m *MockGPUDevice) CreateGpuInstanceWithPlacement(arg0 *nvml.GpuInstanceProfileInfo, arg1 *nvml.GpuInstancePlacement) (nvml.GpuInstance, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateGpuInstanceWithPlacement", arg0, arg1) + ret0, _ := ret[0].(nvml.GpuInstance) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// CreateGpuInstanceWithPlacement indicates an expected call of CreateGpuInstanceWithPlacement. +func (mr *MockGPUDeviceMockRecorder) CreateGpuInstanceWithPlacement(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGpuInstanceWithPlacement", reflect.TypeOf((*MockGPUDevice)(nil).CreateGpuInstanceWithPlacement), arg0, arg1) +} + +// FreezeNvLinkUtilizationCounter mocks base method. +func (m *MockGPUDevice) FreezeNvLinkUtilizationCounter(arg0, arg1 int, arg2 nvml.EnableState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FreezeNvLinkUtilizationCounter", arg0, arg1, arg2) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// FreezeNvLinkUtilizationCounter indicates an expected call of FreezeNvLinkUtilizationCounter. +func (mr *MockGPUDeviceMockRecorder) FreezeNvLinkUtilizationCounter(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FreezeNvLinkUtilizationCounter", reflect.TypeOf((*MockGPUDevice)(nil).FreezeNvLinkUtilizationCounter), arg0, arg1, arg2) +} + +// GetAPIRestriction mocks base method. +func (m *MockGPUDevice) GetAPIRestriction(arg0 nvml.RestrictedAPI) (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAPIRestriction", arg0) + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetAPIRestriction indicates an expected call of GetAPIRestriction. +func (mr *MockGPUDeviceMockRecorder) GetAPIRestriction(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAPIRestriction", reflect.TypeOf((*MockGPUDevice)(nil).GetAPIRestriction), arg0) +} + +// GetAccountingBufferSize mocks base method. +func (m *MockGPUDevice) GetAccountingBufferSize() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountingBufferSize") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetAccountingBufferSize indicates an expected call of GetAccountingBufferSize. +func (mr *MockGPUDeviceMockRecorder) GetAccountingBufferSize() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountingBufferSize", reflect.TypeOf((*MockGPUDevice)(nil).GetAccountingBufferSize)) +} + +// GetAccountingMode mocks base method. +func (m *MockGPUDevice) GetAccountingMode() (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountingMode") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetAccountingMode indicates an expected call of GetAccountingMode. +func (mr *MockGPUDeviceMockRecorder) GetAccountingMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountingMode", reflect.TypeOf((*MockGPUDevice)(nil).GetAccountingMode)) +} + +// GetAccountingPids mocks base method. +func (m *MockGPUDevice) GetAccountingPids() ([]int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountingPids") + ret0, _ := ret[0].([]int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetAccountingPids indicates an expected call of GetAccountingPids. +func (mr *MockGPUDeviceMockRecorder) GetAccountingPids() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountingPids", reflect.TypeOf((*MockGPUDevice)(nil).GetAccountingPids)) +} + +// GetAccountingStats mocks base method. +func (m *MockGPUDevice) GetAccountingStats(arg0 uint32) (nvml.AccountingStats, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountingStats", arg0) + ret0, _ := ret[0].(nvml.AccountingStats) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetAccountingStats indicates an expected call of GetAccountingStats. +func (mr *MockGPUDeviceMockRecorder) GetAccountingStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountingStats", reflect.TypeOf((*MockGPUDevice)(nil).GetAccountingStats), arg0) +} + +// GetActiveVgpus mocks base method. +func (m *MockGPUDevice) GetActiveVgpus() ([]nvml.VgpuInstance, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveVgpus") + ret0, _ := ret[0].([]nvml.VgpuInstance) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetActiveVgpus indicates an expected call of GetActiveVgpus. +func (mr *MockGPUDeviceMockRecorder) GetActiveVgpus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveVgpus", reflect.TypeOf((*MockGPUDevice)(nil).GetActiveVgpus)) +} + +// GetAdaptiveClockInfoStatus mocks base method. +func (m *MockGPUDevice) GetAdaptiveClockInfoStatus() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAdaptiveClockInfoStatus") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetAdaptiveClockInfoStatus indicates an expected call of GetAdaptiveClockInfoStatus. +func (mr *MockGPUDeviceMockRecorder) GetAdaptiveClockInfoStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAdaptiveClockInfoStatus", reflect.TypeOf((*MockGPUDevice)(nil).GetAdaptiveClockInfoStatus)) +} + +// GetApplicationsClock mocks base method. +func (m *MockGPUDevice) GetApplicationsClock(arg0 nvml.ClockType) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetApplicationsClock", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetApplicationsClock indicates an expected call of GetApplicationsClock. +func (mr *MockGPUDeviceMockRecorder) GetApplicationsClock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetApplicationsClock", reflect.TypeOf((*MockGPUDevice)(nil).GetApplicationsClock), arg0) +} + +// GetArchitecture mocks base method. +func (m *MockGPUDevice) GetArchitecture() (nvml.DeviceArchitecture, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetArchitecture") + ret0, _ := ret[0].(nvml.DeviceArchitecture) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetArchitecture indicates an expected call of GetArchitecture. +func (mr *MockGPUDeviceMockRecorder) GetArchitecture() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetArchitecture", reflect.TypeOf((*MockGPUDevice)(nil).GetArchitecture)) +} + +// GetAttributes mocks base method. +func (m *MockGPUDevice) GetAttributes() (nvml.DeviceAttributes, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAttributes") + ret0, _ := ret[0].(nvml.DeviceAttributes) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetAttributes indicates an expected call of GetAttributes. +func (mr *MockGPUDeviceMockRecorder) GetAttributes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttributes", reflect.TypeOf((*MockGPUDevice)(nil).GetAttributes)) +} + +// GetAutoBoostedClocksEnabled mocks base method. +func (m *MockGPUDevice) GetAutoBoostedClocksEnabled() (nvml.EnableState, nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAutoBoostedClocksEnabled") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.EnableState) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetAutoBoostedClocksEnabled indicates an expected call of GetAutoBoostedClocksEnabled. +func (mr *MockGPUDeviceMockRecorder) GetAutoBoostedClocksEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAutoBoostedClocksEnabled", reflect.TypeOf((*MockGPUDevice)(nil).GetAutoBoostedClocksEnabled)) +} + +// GetBAR1MemoryInfo mocks base method. +func (m *MockGPUDevice) GetBAR1MemoryInfo() (nvml.BAR1Memory, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBAR1MemoryInfo") + ret0, _ := ret[0].(nvml.BAR1Memory) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetBAR1MemoryInfo indicates an expected call of GetBAR1MemoryInfo. +func (mr *MockGPUDeviceMockRecorder) GetBAR1MemoryInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBAR1MemoryInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetBAR1MemoryInfo)) +} + +// GetBoardId mocks base method. +func (m *MockGPUDevice) GetBoardId() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBoardId") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetBoardId indicates an expected call of GetBoardId. +func (mr *MockGPUDeviceMockRecorder) GetBoardId() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBoardId", reflect.TypeOf((*MockGPUDevice)(nil).GetBoardId)) +} + +// GetBoardPartNumber mocks base method. +func (m *MockGPUDevice) GetBoardPartNumber() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBoardPartNumber") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetBoardPartNumber indicates an expected call of GetBoardPartNumber. +func (mr *MockGPUDeviceMockRecorder) GetBoardPartNumber() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBoardPartNumber", reflect.TypeOf((*MockGPUDevice)(nil).GetBoardPartNumber)) +} + +// GetBrand mocks base method. +func (m *MockGPUDevice) GetBrand() (nvml.BrandType, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBrand") + ret0, _ := ret[0].(nvml.BrandType) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetBrand indicates an expected call of GetBrand. +func (mr *MockGPUDeviceMockRecorder) GetBrand() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBrand", reflect.TypeOf((*MockGPUDevice)(nil).GetBrand)) +} + +// GetBridgeChipInfo mocks base method. +func (m *MockGPUDevice) GetBridgeChipInfo() (nvml.BridgeChipHierarchy, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBridgeChipInfo") + ret0, _ := ret[0].(nvml.BridgeChipHierarchy) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetBridgeChipInfo indicates an expected call of GetBridgeChipInfo. +func (mr *MockGPUDeviceMockRecorder) GetBridgeChipInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBridgeChipInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetBridgeChipInfo)) +} + +// GetBusType mocks base method. +func (m *MockGPUDevice) GetBusType() (nvml.BusType, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBusType") + ret0, _ := ret[0].(nvml.BusType) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetBusType indicates an expected call of GetBusType. +func (mr *MockGPUDeviceMockRecorder) GetBusType() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBusType", reflect.TypeOf((*MockGPUDevice)(nil).GetBusType)) +} + +// GetC2cModeInfoV mocks base method. +func (m *MockGPUDevice) GetC2cModeInfoV() nvml.C2cModeInfoHandler { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetC2cModeInfoV") + ret0, _ := ret[0].(nvml.C2cModeInfoHandler) + return ret0 +} + +// GetC2cModeInfoV indicates an expected call of GetC2cModeInfoV. +func (mr *MockGPUDeviceMockRecorder) GetC2cModeInfoV() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetC2cModeInfoV", reflect.TypeOf((*MockGPUDevice)(nil).GetC2cModeInfoV)) +} + +// GetClkMonStatus mocks base method. +func (m *MockGPUDevice) GetClkMonStatus() (nvml.ClkMonStatus, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClkMonStatus") + ret0, _ := ret[0].(nvml.ClkMonStatus) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetClkMonStatus indicates an expected call of GetClkMonStatus. +func (mr *MockGPUDeviceMockRecorder) GetClkMonStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClkMonStatus", reflect.TypeOf((*MockGPUDevice)(nil).GetClkMonStatus)) +} + +// GetClock mocks base method. +func (m *MockGPUDevice) GetClock(arg0 nvml.ClockType, arg1 nvml.ClockId) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClock", arg0, arg1) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetClock indicates an expected call of GetClock. +func (mr *MockGPUDeviceMockRecorder) GetClock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClock", reflect.TypeOf((*MockGPUDevice)(nil).GetClock), arg0, arg1) +} + +// GetClockInfo mocks base method. +func (m *MockGPUDevice) GetClockInfo(arg0 nvml.ClockType) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClockInfo", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetClockInfo indicates an expected call of GetClockInfo. +func (mr *MockGPUDeviceMockRecorder) GetClockInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClockInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetClockInfo), arg0) +} + +// GetComputeInstanceId mocks base method. +func (m *MockGPUDevice) GetComputeInstanceId() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetComputeInstanceId") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetComputeInstanceId indicates an expected call of GetComputeInstanceId. +func (mr *MockGPUDeviceMockRecorder) GetComputeInstanceId() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetComputeInstanceId", reflect.TypeOf((*MockGPUDevice)(nil).GetComputeInstanceId)) +} + +// GetComputeMode mocks base method. +func (m *MockGPUDevice) GetComputeMode() (nvml.ComputeMode, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetComputeMode") + ret0, _ := ret[0].(nvml.ComputeMode) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetComputeMode indicates an expected call of GetComputeMode. +func (mr *MockGPUDeviceMockRecorder) GetComputeMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetComputeMode", reflect.TypeOf((*MockGPUDevice)(nil).GetComputeMode)) +} + +// GetComputeRunningProcesses mocks base method. +func (m *MockGPUDevice) GetComputeRunningProcesses() ([]nvml.ProcessInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetComputeRunningProcesses") + ret0, _ := ret[0].([]nvml.ProcessInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetComputeRunningProcesses indicates an expected call of GetComputeRunningProcesses. +func (mr *MockGPUDeviceMockRecorder) GetComputeRunningProcesses() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetComputeRunningProcesses", reflect.TypeOf((*MockGPUDevice)(nil).GetComputeRunningProcesses)) +} + +// GetConfComputeGpuAttestationReport mocks base method. +func (m *MockGPUDevice) GetConfComputeGpuAttestationReport() (nvml.ConfComputeGpuAttestationReport, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfComputeGpuAttestationReport") + ret0, _ := ret[0].(nvml.ConfComputeGpuAttestationReport) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetConfComputeGpuAttestationReport indicates an expected call of GetConfComputeGpuAttestationReport. +func (mr *MockGPUDeviceMockRecorder) GetConfComputeGpuAttestationReport() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfComputeGpuAttestationReport", reflect.TypeOf((*MockGPUDevice)(nil).GetConfComputeGpuAttestationReport)) +} + +// GetConfComputeGpuCertificate mocks base method. +func (m *MockGPUDevice) GetConfComputeGpuCertificate() (nvml.ConfComputeGpuCertificate, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfComputeGpuCertificate") + ret0, _ := ret[0].(nvml.ConfComputeGpuCertificate) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetConfComputeGpuCertificate indicates an expected call of GetConfComputeGpuCertificate. +func (mr *MockGPUDeviceMockRecorder) GetConfComputeGpuCertificate() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfComputeGpuCertificate", reflect.TypeOf((*MockGPUDevice)(nil).GetConfComputeGpuCertificate)) +} + +// GetConfComputeMemSizeInfo mocks base method. +func (m *MockGPUDevice) GetConfComputeMemSizeInfo() (nvml.ConfComputeMemSizeInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfComputeMemSizeInfo") + ret0, _ := ret[0].(nvml.ConfComputeMemSizeInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetConfComputeMemSizeInfo indicates an expected call of GetConfComputeMemSizeInfo. +func (mr *MockGPUDeviceMockRecorder) GetConfComputeMemSizeInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfComputeMemSizeInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetConfComputeMemSizeInfo)) +} + +// GetConfComputeProtectedMemoryUsage mocks base method. +func (m *MockGPUDevice) GetConfComputeProtectedMemoryUsage() (nvml.Memory, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfComputeProtectedMemoryUsage") + ret0, _ := ret[0].(nvml.Memory) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetConfComputeProtectedMemoryUsage indicates an expected call of GetConfComputeProtectedMemoryUsage. +func (mr *MockGPUDeviceMockRecorder) GetConfComputeProtectedMemoryUsage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfComputeProtectedMemoryUsage", reflect.TypeOf((*MockGPUDevice)(nil).GetConfComputeProtectedMemoryUsage)) +} + +// GetCpuAffinity mocks base method. +func (m *MockGPUDevice) GetCpuAffinity(arg0 int) ([]uint, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCpuAffinity", arg0) + ret0, _ := ret[0].([]uint) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetCpuAffinity indicates an expected call of GetCpuAffinity. +func (mr *MockGPUDeviceMockRecorder) GetCpuAffinity(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCpuAffinity", reflect.TypeOf((*MockGPUDevice)(nil).GetCpuAffinity), arg0) +} + +// GetCpuAffinityWithinScope mocks base method. +func (m *MockGPUDevice) GetCpuAffinityWithinScope(arg0 int, arg1 nvml.AffinityScope) ([]uint, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCpuAffinityWithinScope", arg0, arg1) + ret0, _ := ret[0].([]uint) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetCpuAffinityWithinScope indicates an expected call of GetCpuAffinityWithinScope. +func (mr *MockGPUDeviceMockRecorder) GetCpuAffinityWithinScope(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCpuAffinityWithinScope", reflect.TypeOf((*MockGPUDevice)(nil).GetCpuAffinityWithinScope), arg0, arg1) +} + +// GetCreatableVgpus mocks base method. +func (m *MockGPUDevice) GetCreatableVgpus() ([]nvml.VgpuTypeId, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCreatableVgpus") + ret0, _ := ret[0].([]nvml.VgpuTypeId) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetCreatableVgpus indicates an expected call of GetCreatableVgpus. +func (mr *MockGPUDeviceMockRecorder) GetCreatableVgpus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCreatableVgpus", reflect.TypeOf((*MockGPUDevice)(nil).GetCreatableVgpus)) +} + +// GetCudaComputeCapability mocks base method. +func (m *MockGPUDevice) GetCudaComputeCapability() (int, int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCudaComputeCapability") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetCudaComputeCapability indicates an expected call of GetCudaComputeCapability. +func (mr *MockGPUDeviceMockRecorder) GetCudaComputeCapability() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCudaComputeCapability", reflect.TypeOf((*MockGPUDevice)(nil).GetCudaComputeCapability)) +} + +// GetCurrPcieLinkGeneration mocks base method. +func (m *MockGPUDevice) GetCurrPcieLinkGeneration() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrPcieLinkGeneration") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetCurrPcieLinkGeneration indicates an expected call of GetCurrPcieLinkGeneration. +func (mr *MockGPUDeviceMockRecorder) GetCurrPcieLinkGeneration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrPcieLinkGeneration", reflect.TypeOf((*MockGPUDevice)(nil).GetCurrPcieLinkGeneration)) +} + +// GetCurrPcieLinkWidth mocks base method. +func (m *MockGPUDevice) GetCurrPcieLinkWidth() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrPcieLinkWidth") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetCurrPcieLinkWidth indicates an expected call of GetCurrPcieLinkWidth. +func (mr *MockGPUDeviceMockRecorder) GetCurrPcieLinkWidth() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrPcieLinkWidth", reflect.TypeOf((*MockGPUDevice)(nil).GetCurrPcieLinkWidth)) +} + +// GetCurrentClocksEventReasons mocks base method. +func (m *MockGPUDevice) GetCurrentClocksEventReasons() (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentClocksEventReasons") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetCurrentClocksEventReasons indicates an expected call of GetCurrentClocksEventReasons. +func (mr *MockGPUDeviceMockRecorder) GetCurrentClocksEventReasons() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentClocksEventReasons", reflect.TypeOf((*MockGPUDevice)(nil).GetCurrentClocksEventReasons)) +} + +// GetCurrentClocksThrottleReasons mocks base method. +func (m *MockGPUDevice) GetCurrentClocksThrottleReasons() (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentClocksThrottleReasons") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetCurrentClocksThrottleReasons indicates an expected call of GetCurrentClocksThrottleReasons. +func (mr *MockGPUDeviceMockRecorder) GetCurrentClocksThrottleReasons() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentClocksThrottleReasons", reflect.TypeOf((*MockGPUDevice)(nil).GetCurrentClocksThrottleReasons)) +} + +// GetDecoderUtilization mocks base method. +func (m *MockGPUDevice) GetDecoderUtilization() (uint32, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDecoderUtilization") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetDecoderUtilization indicates an expected call of GetDecoderUtilization. +func (mr *MockGPUDeviceMockRecorder) GetDecoderUtilization() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDecoderUtilization", reflect.TypeOf((*MockGPUDevice)(nil).GetDecoderUtilization)) +} + +// GetDefaultApplicationsClock mocks base method. +func (m *MockGPUDevice) GetDefaultApplicationsClock(arg0 nvml.ClockType) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDefaultApplicationsClock", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetDefaultApplicationsClock indicates an expected call of GetDefaultApplicationsClock. +func (mr *MockGPUDeviceMockRecorder) GetDefaultApplicationsClock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDefaultApplicationsClock", reflect.TypeOf((*MockGPUDevice)(nil).GetDefaultApplicationsClock), arg0) +} + +// GetDefaultEccMode mocks base method. +func (m *MockGPUDevice) GetDefaultEccMode() (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDefaultEccMode") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetDefaultEccMode indicates an expected call of GetDefaultEccMode. +func (mr *MockGPUDeviceMockRecorder) GetDefaultEccMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDefaultEccMode", reflect.TypeOf((*MockGPUDevice)(nil).GetDefaultEccMode)) +} + +// GetDetailedEccErrors mocks base method. +func (m *MockGPUDevice) GetDetailedEccErrors(arg0 nvml.MemoryErrorType, arg1 nvml.EccCounterType) (nvml.EccErrorCounts, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDetailedEccErrors", arg0, arg1) + ret0, _ := ret[0].(nvml.EccErrorCounts) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetDetailedEccErrors indicates an expected call of GetDetailedEccErrors. +func (mr *MockGPUDeviceMockRecorder) GetDetailedEccErrors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDetailedEccErrors", reflect.TypeOf((*MockGPUDevice)(nil).GetDetailedEccErrors), arg0, arg1) +} + +// GetDeviceHandleFromMigDeviceHandle mocks base method. +func (m *MockGPUDevice) GetDeviceHandleFromMigDeviceHandle() (nvml.Device, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceHandleFromMigDeviceHandle") + ret0, _ := ret[0].(nvml.Device) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetDeviceHandleFromMigDeviceHandle indicates an expected call of GetDeviceHandleFromMigDeviceHandle. +func (mr *MockGPUDeviceMockRecorder) GetDeviceHandleFromMigDeviceHandle() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceHandleFromMigDeviceHandle", reflect.TypeOf((*MockGPUDevice)(nil).GetDeviceHandleFromMigDeviceHandle)) +} + +// GetDisplayActive mocks base method. +func (m *MockGPUDevice) GetDisplayActive() (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDisplayActive") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetDisplayActive indicates an expected call of GetDisplayActive. +func (mr *MockGPUDeviceMockRecorder) GetDisplayActive() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDisplayActive", reflect.TypeOf((*MockGPUDevice)(nil).GetDisplayActive)) +} + +// GetDisplayMode mocks base method. +func (m *MockGPUDevice) GetDisplayMode() (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDisplayMode") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetDisplayMode indicates an expected call of GetDisplayMode. +func (mr *MockGPUDeviceMockRecorder) GetDisplayMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDisplayMode", reflect.TypeOf((*MockGPUDevice)(nil).GetDisplayMode)) +} + +// GetDriverModel mocks base method. +func (m *MockGPUDevice) GetDriverModel() (nvml.DriverModel, nvml.DriverModel, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDriverModel") + ret0, _ := ret[0].(nvml.DriverModel) + ret1, _ := ret[1].(nvml.DriverModel) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetDriverModel indicates an expected call of GetDriverModel. +func (mr *MockGPUDeviceMockRecorder) GetDriverModel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDriverModel", reflect.TypeOf((*MockGPUDevice)(nil).GetDriverModel)) +} + +// GetDynamicPstatesInfo mocks base method. +func (m *MockGPUDevice) GetDynamicPstatesInfo() (nvml.GpuDynamicPstatesInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDynamicPstatesInfo") + ret0, _ := ret[0].(nvml.GpuDynamicPstatesInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetDynamicPstatesInfo indicates an expected call of GetDynamicPstatesInfo. +func (mr *MockGPUDeviceMockRecorder) GetDynamicPstatesInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDynamicPstatesInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetDynamicPstatesInfo)) +} + +// GetEccMode mocks base method. +func (m *MockGPUDevice) GetEccMode() (nvml.EnableState, nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEccMode") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.EnableState) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetEccMode indicates an expected call of GetEccMode. +func (mr *MockGPUDeviceMockRecorder) GetEccMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEccMode", reflect.TypeOf((*MockGPUDevice)(nil).GetEccMode)) +} + +// GetEncoderCapacity mocks base method. +func (m *MockGPUDevice) GetEncoderCapacity(arg0 nvml.EncoderType) (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEncoderCapacity", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetEncoderCapacity indicates an expected call of GetEncoderCapacity. +func (mr *MockGPUDeviceMockRecorder) GetEncoderCapacity(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEncoderCapacity", reflect.TypeOf((*MockGPUDevice)(nil).GetEncoderCapacity), arg0) +} + +// GetEncoderSessions mocks base method. +func (m *MockGPUDevice) GetEncoderSessions() ([]nvml.EncoderSessionInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEncoderSessions") + ret0, _ := ret[0].([]nvml.EncoderSessionInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetEncoderSessions indicates an expected call of GetEncoderSessions. +func (mr *MockGPUDeviceMockRecorder) GetEncoderSessions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEncoderSessions", reflect.TypeOf((*MockGPUDevice)(nil).GetEncoderSessions)) +} + +// GetEncoderStats mocks base method. +func (m *MockGPUDevice) GetEncoderStats() (int, uint32, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEncoderStats") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(uint32) + ret3, _ := ret[3].(nvml.Return) + return ret0, ret1, ret2, ret3 +} + +// GetEncoderStats indicates an expected call of GetEncoderStats. +func (mr *MockGPUDeviceMockRecorder) GetEncoderStats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEncoderStats", reflect.TypeOf((*MockGPUDevice)(nil).GetEncoderStats)) +} + +// GetEncoderUtilization mocks base method. +func (m *MockGPUDevice) GetEncoderUtilization() (uint32, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEncoderUtilization") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetEncoderUtilization indicates an expected call of GetEncoderUtilization. +func (mr *MockGPUDeviceMockRecorder) GetEncoderUtilization() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEncoderUtilization", reflect.TypeOf((*MockGPUDevice)(nil).GetEncoderUtilization)) +} + +// GetEnforcedPowerLimit mocks base method. +func (m *MockGPUDevice) GetEnforcedPowerLimit() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEnforcedPowerLimit") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetEnforcedPowerLimit indicates an expected call of GetEnforcedPowerLimit. +func (mr *MockGPUDeviceMockRecorder) GetEnforcedPowerLimit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEnforcedPowerLimit", reflect.TypeOf((*MockGPUDevice)(nil).GetEnforcedPowerLimit)) +} + +// GetFBCSessions mocks base method. +func (m *MockGPUDevice) GetFBCSessions() ([]nvml.FBCSessionInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFBCSessions") + ret0, _ := ret[0].([]nvml.FBCSessionInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetFBCSessions indicates an expected call of GetFBCSessions. +func (mr *MockGPUDeviceMockRecorder) GetFBCSessions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFBCSessions", reflect.TypeOf((*MockGPUDevice)(nil).GetFBCSessions)) +} + +// GetFBCStats mocks base method. +func (m *MockGPUDevice) GetFBCStats() (nvml.FBCStats, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFBCStats") + ret0, _ := ret[0].(nvml.FBCStats) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetFBCStats indicates an expected call of GetFBCStats. +func (mr *MockGPUDeviceMockRecorder) GetFBCStats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFBCStats", reflect.TypeOf((*MockGPUDevice)(nil).GetFBCStats)) +} + +// GetFanControlPolicy_v2 mocks base method. +func (m *MockGPUDevice) GetFanControlPolicy_v2(arg0 int) (nvml.FanControlPolicy, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFanControlPolicy_v2", arg0) + ret0, _ := ret[0].(nvml.FanControlPolicy) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetFanControlPolicy_v2 indicates an expected call of GetFanControlPolicy_v2. +func (mr *MockGPUDeviceMockRecorder) GetFanControlPolicy_v2(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFanControlPolicy_v2", reflect.TypeOf((*MockGPUDevice)(nil).GetFanControlPolicy_v2), arg0) +} + +// GetFanSpeed mocks base method. +func (m *MockGPUDevice) GetFanSpeed() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFanSpeed") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetFanSpeed indicates an expected call of GetFanSpeed. +func (mr *MockGPUDeviceMockRecorder) GetFanSpeed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFanSpeed", reflect.TypeOf((*MockGPUDevice)(nil).GetFanSpeed)) +} + +// GetFanSpeed_v2 mocks base method. +func (m *MockGPUDevice) GetFanSpeed_v2(arg0 int) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFanSpeed_v2", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetFanSpeed_v2 indicates an expected call of GetFanSpeed_v2. +func (mr *MockGPUDeviceMockRecorder) GetFanSpeed_v2(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFanSpeed_v2", reflect.TypeOf((*MockGPUDevice)(nil).GetFanSpeed_v2), arg0) +} + +// GetFieldValues mocks base method. +func (m *MockGPUDevice) GetFieldValues(arg0 []nvml.FieldValue) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFieldValues", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// GetFieldValues indicates an expected call of GetFieldValues. +func (mr *MockGPUDeviceMockRecorder) GetFieldValues(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFieldValues", reflect.TypeOf((*MockGPUDevice)(nil).GetFieldValues), arg0) +} + +// GetGpcClkMinMaxVfOffset mocks base method. +func (m *MockGPUDevice) GetGpcClkMinMaxVfOffset() (int, int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpcClkMinMaxVfOffset") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetGpcClkMinMaxVfOffset indicates an expected call of GetGpcClkMinMaxVfOffset. +func (mr *MockGPUDeviceMockRecorder) GetGpcClkMinMaxVfOffset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpcClkMinMaxVfOffset", reflect.TypeOf((*MockGPUDevice)(nil).GetGpcClkMinMaxVfOffset)) +} + +// GetGpcClkVfOffset mocks base method. +func (m *MockGPUDevice) GetGpcClkVfOffset() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpcClkVfOffset") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpcClkVfOffset indicates an expected call of GetGpcClkVfOffset. +func (mr *MockGPUDeviceMockRecorder) GetGpcClkVfOffset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpcClkVfOffset", reflect.TypeOf((*MockGPUDevice)(nil).GetGpcClkVfOffset)) +} + +// GetGpuFabricInfo mocks base method. +func (m *MockGPUDevice) GetGpuFabricInfo() (nvml.GpuFabricInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuFabricInfo") + ret0, _ := ret[0].(nvml.GpuFabricInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuFabricInfo indicates an expected call of GetGpuFabricInfo. +func (mr *MockGPUDeviceMockRecorder) GetGpuFabricInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuFabricInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuFabricInfo)) +} + +// GetGpuFabricInfoV mocks base method. +func (m *MockGPUDevice) GetGpuFabricInfoV() nvml.GpuFabricInfoHandler { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuFabricInfoV") + ret0, _ := ret[0].(nvml.GpuFabricInfoHandler) + return ret0 +} + +// GetGpuFabricInfoV indicates an expected call of GetGpuFabricInfoV. +func (mr *MockGPUDeviceMockRecorder) GetGpuFabricInfoV() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuFabricInfoV", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuFabricInfoV)) +} + +// GetGpuInstanceById mocks base method. +func (m *MockGPUDevice) GetGpuInstanceById(arg0 int) (nvml.GpuInstance, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuInstanceById", arg0) + ret0, _ := ret[0].(nvml.GpuInstance) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuInstanceById indicates an expected call of GetGpuInstanceById. +func (mr *MockGPUDeviceMockRecorder) GetGpuInstanceById(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuInstanceById", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuInstanceById), arg0) +} + +// GetGpuInstanceId mocks base method. +func (m *MockGPUDevice) GetGpuInstanceId() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuInstanceId") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuInstanceId indicates an expected call of GetGpuInstanceId. +func (mr *MockGPUDeviceMockRecorder) GetGpuInstanceId() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuInstanceId", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuInstanceId)) +} + +// GetGpuInstancePossiblePlacements mocks base method. +func (m *MockGPUDevice) GetGpuInstancePossiblePlacements(arg0 *nvml.GpuInstanceProfileInfo) ([]nvml.GpuInstancePlacement, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuInstancePossiblePlacements", arg0) + ret0, _ := ret[0].([]nvml.GpuInstancePlacement) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuInstancePossiblePlacements indicates an expected call of GetGpuInstancePossiblePlacements. +func (mr *MockGPUDeviceMockRecorder) GetGpuInstancePossiblePlacements(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuInstancePossiblePlacements", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuInstancePossiblePlacements), arg0) +} + +// GetGpuInstanceProfileInfo mocks base method. +func (m *MockGPUDevice) GetGpuInstanceProfileInfo(arg0 int) (nvml.GpuInstanceProfileInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuInstanceProfileInfo", arg0) + ret0, _ := ret[0].(nvml.GpuInstanceProfileInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuInstanceProfileInfo indicates an expected call of GetGpuInstanceProfileInfo. +func (mr *MockGPUDeviceMockRecorder) GetGpuInstanceProfileInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuInstanceProfileInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuInstanceProfileInfo), arg0) +} + +// GetGpuInstanceProfileInfoV mocks base method. +func (m *MockGPUDevice) GetGpuInstanceProfileInfoV(arg0 int) nvml.GpuInstanceProfileInfoHandler { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuInstanceProfileInfoV", arg0) + ret0, _ := ret[0].(nvml.GpuInstanceProfileInfoHandler) + return ret0 +} + +// GetGpuInstanceProfileInfoV indicates an expected call of GetGpuInstanceProfileInfoV. +func (mr *MockGPUDeviceMockRecorder) GetGpuInstanceProfileInfoV(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuInstanceProfileInfoV", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuInstanceProfileInfoV), arg0) +} + +// GetGpuInstanceRemainingCapacity mocks base method. +func (m *MockGPUDevice) GetGpuInstanceRemainingCapacity(arg0 *nvml.GpuInstanceProfileInfo) (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuInstanceRemainingCapacity", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuInstanceRemainingCapacity indicates an expected call of GetGpuInstanceRemainingCapacity. +func (mr *MockGPUDeviceMockRecorder) GetGpuInstanceRemainingCapacity(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuInstanceRemainingCapacity", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuInstanceRemainingCapacity), arg0) +} + +// GetGpuInstances mocks base method. +func (m *MockGPUDevice) GetGpuInstances(arg0 *nvml.GpuInstanceProfileInfo) ([]nvml.GpuInstance, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuInstances", arg0) + ret0, _ := ret[0].([]nvml.GpuInstance) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuInstances indicates an expected call of GetGpuInstances. +func (mr *MockGPUDeviceMockRecorder) GetGpuInstances(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuInstances", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuInstances), arg0) +} + +// GetGpuMaxPcieLinkGeneration mocks base method. +func (m *MockGPUDevice) GetGpuMaxPcieLinkGeneration() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuMaxPcieLinkGeneration") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGpuMaxPcieLinkGeneration indicates an expected call of GetGpuMaxPcieLinkGeneration. +func (mr *MockGPUDeviceMockRecorder) GetGpuMaxPcieLinkGeneration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuMaxPcieLinkGeneration", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuMaxPcieLinkGeneration)) +} + +// GetGpuOperationMode mocks base method. +func (m *MockGPUDevice) GetGpuOperationMode() (nvml.GpuOperationMode, nvml.GpuOperationMode, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGpuOperationMode") + ret0, _ := ret[0].(nvml.GpuOperationMode) + ret1, _ := ret[1].(nvml.GpuOperationMode) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetGpuOperationMode indicates an expected call of GetGpuOperationMode. +func (mr *MockGPUDeviceMockRecorder) GetGpuOperationMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGpuOperationMode", reflect.TypeOf((*MockGPUDevice)(nil).GetGpuOperationMode)) +} + +// GetGraphicsRunningProcesses mocks base method. +func (m *MockGPUDevice) GetGraphicsRunningProcesses() ([]nvml.ProcessInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGraphicsRunningProcesses") + ret0, _ := ret[0].([]nvml.ProcessInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGraphicsRunningProcesses indicates an expected call of GetGraphicsRunningProcesses. +func (mr *MockGPUDeviceMockRecorder) GetGraphicsRunningProcesses() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGraphicsRunningProcesses", reflect.TypeOf((*MockGPUDevice)(nil).GetGraphicsRunningProcesses)) +} + +// GetGridLicensableFeatures mocks base method. +func (m *MockGPUDevice) GetGridLicensableFeatures() (nvml.GridLicensableFeatures, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGridLicensableFeatures") + ret0, _ := ret[0].(nvml.GridLicensableFeatures) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGridLicensableFeatures indicates an expected call of GetGridLicensableFeatures. +func (mr *MockGPUDeviceMockRecorder) GetGridLicensableFeatures() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGridLicensableFeatures", reflect.TypeOf((*MockGPUDevice)(nil).GetGridLicensableFeatures)) +} + +// GetGspFirmwareMode mocks base method. +func (m *MockGPUDevice) GetGspFirmwareMode() (bool, bool, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGspFirmwareMode") + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetGspFirmwareMode indicates an expected call of GetGspFirmwareMode. +func (mr *MockGPUDeviceMockRecorder) GetGspFirmwareMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGspFirmwareMode", reflect.TypeOf((*MockGPUDevice)(nil).GetGspFirmwareMode)) +} + +// GetGspFirmwareVersion mocks base method. +func (m *MockGPUDevice) GetGspFirmwareVersion() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGspFirmwareVersion") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetGspFirmwareVersion indicates an expected call of GetGspFirmwareVersion. +func (mr *MockGPUDeviceMockRecorder) GetGspFirmwareVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGspFirmwareVersion", reflect.TypeOf((*MockGPUDevice)(nil).GetGspFirmwareVersion)) +} + +// GetHostVgpuMode mocks base method. +func (m *MockGPUDevice) GetHostVgpuMode() (nvml.HostVgpuMode, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHostVgpuMode") + ret0, _ := ret[0].(nvml.HostVgpuMode) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetHostVgpuMode indicates an expected call of GetHostVgpuMode. +func (mr *MockGPUDeviceMockRecorder) GetHostVgpuMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostVgpuMode", reflect.TypeOf((*MockGPUDevice)(nil).GetHostVgpuMode)) +} + +// GetIndex mocks base method. +func (m *MockGPUDevice) GetIndex() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIndex") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetIndex indicates an expected call of GetIndex. +func (mr *MockGPUDeviceMockRecorder) GetIndex() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndex", reflect.TypeOf((*MockGPUDevice)(nil).GetIndex)) +} + +// GetInforomConfigurationChecksum mocks base method. +func (m *MockGPUDevice) GetInforomConfigurationChecksum() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInforomConfigurationChecksum") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetInforomConfigurationChecksum indicates an expected call of GetInforomConfigurationChecksum. +func (mr *MockGPUDeviceMockRecorder) GetInforomConfigurationChecksum() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInforomConfigurationChecksum", reflect.TypeOf((*MockGPUDevice)(nil).GetInforomConfigurationChecksum)) +} + +// GetInforomImageVersion mocks base method. +func (m *MockGPUDevice) GetInforomImageVersion() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInforomImageVersion") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetInforomImageVersion indicates an expected call of GetInforomImageVersion. +func (mr *MockGPUDeviceMockRecorder) GetInforomImageVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInforomImageVersion", reflect.TypeOf((*MockGPUDevice)(nil).GetInforomImageVersion)) +} + +// GetInforomVersion mocks base method. +func (m *MockGPUDevice) GetInforomVersion(arg0 nvml.InforomObject) (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInforomVersion", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetInforomVersion indicates an expected call of GetInforomVersion. +func (mr *MockGPUDeviceMockRecorder) GetInforomVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInforomVersion", reflect.TypeOf((*MockGPUDevice)(nil).GetInforomVersion), arg0) +} + +// GetIrqNum mocks base method. +func (m *MockGPUDevice) GetIrqNum() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIrqNum") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetIrqNum indicates an expected call of GetIrqNum. +func (mr *MockGPUDeviceMockRecorder) GetIrqNum() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIrqNum", reflect.TypeOf((*MockGPUDevice)(nil).GetIrqNum)) +} + +// GetJpgUtilization mocks base method. +func (m *MockGPUDevice) GetJpgUtilization() (uint32, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetJpgUtilization") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetJpgUtilization indicates an expected call of GetJpgUtilization. +func (mr *MockGPUDeviceMockRecorder) GetJpgUtilization() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetJpgUtilization", reflect.TypeOf((*MockGPUDevice)(nil).GetJpgUtilization)) +} + +// GetLastBBXFlushTime mocks base method. +func (m *MockGPUDevice) GetLastBBXFlushTime() (uint64, uint, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastBBXFlushTime") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(uint) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetLastBBXFlushTime indicates an expected call of GetLastBBXFlushTime. +func (mr *MockGPUDeviceMockRecorder) GetLastBBXFlushTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastBBXFlushTime", reflect.TypeOf((*MockGPUDevice)(nil).GetLastBBXFlushTime)) +} + +// GetMPSComputeRunningProcesses mocks base method. +func (m *MockGPUDevice) GetMPSComputeRunningProcesses() ([]nvml.ProcessInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMPSComputeRunningProcesses") + ret0, _ := ret[0].([]nvml.ProcessInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMPSComputeRunningProcesses indicates an expected call of GetMPSComputeRunningProcesses. +func (mr *MockGPUDeviceMockRecorder) GetMPSComputeRunningProcesses() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMPSComputeRunningProcesses", reflect.TypeOf((*MockGPUDevice)(nil).GetMPSComputeRunningProcesses)) +} + +// GetMaxClockInfo mocks base method. +func (m *MockGPUDevice) GetMaxClockInfo(arg0 nvml.ClockType) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMaxClockInfo", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMaxClockInfo indicates an expected call of GetMaxClockInfo. +func (mr *MockGPUDeviceMockRecorder) GetMaxClockInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMaxClockInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetMaxClockInfo), arg0) +} + +// GetMaxCustomerBoostClock mocks base method. +func (m *MockGPUDevice) GetMaxCustomerBoostClock(arg0 nvml.ClockType) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMaxCustomerBoostClock", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMaxCustomerBoostClock indicates an expected call of GetMaxCustomerBoostClock. +func (mr *MockGPUDeviceMockRecorder) GetMaxCustomerBoostClock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMaxCustomerBoostClock", reflect.TypeOf((*MockGPUDevice)(nil).GetMaxCustomerBoostClock), arg0) +} + +// GetMaxMigDeviceCount mocks base method. +func (m *MockGPUDevice) GetMaxMigDeviceCount() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMaxMigDeviceCount") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMaxMigDeviceCount indicates an expected call of GetMaxMigDeviceCount. +func (mr *MockGPUDeviceMockRecorder) GetMaxMigDeviceCount() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMaxMigDeviceCount", reflect.TypeOf((*MockGPUDevice)(nil).GetMaxMigDeviceCount)) +} + +// GetMaxPcieLinkGeneration mocks base method. +func (m *MockGPUDevice) GetMaxPcieLinkGeneration() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMaxPcieLinkGeneration") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMaxPcieLinkGeneration indicates an expected call of GetMaxPcieLinkGeneration. +func (mr *MockGPUDeviceMockRecorder) GetMaxPcieLinkGeneration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMaxPcieLinkGeneration", reflect.TypeOf((*MockGPUDevice)(nil).GetMaxPcieLinkGeneration)) +} + +// GetMaxPcieLinkWidth mocks base method. +func (m *MockGPUDevice) GetMaxPcieLinkWidth() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMaxPcieLinkWidth") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMaxPcieLinkWidth indicates an expected call of GetMaxPcieLinkWidth. +func (mr *MockGPUDeviceMockRecorder) GetMaxPcieLinkWidth() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMaxPcieLinkWidth", reflect.TypeOf((*MockGPUDevice)(nil).GetMaxPcieLinkWidth)) +} + +// GetMemClkMinMaxVfOffset mocks base method. +func (m *MockGPUDevice) GetMemClkMinMaxVfOffset() (int, int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMemClkMinMaxVfOffset") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetMemClkMinMaxVfOffset indicates an expected call of GetMemClkMinMaxVfOffset. +func (mr *MockGPUDeviceMockRecorder) GetMemClkMinMaxVfOffset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemClkMinMaxVfOffset", reflect.TypeOf((*MockGPUDevice)(nil).GetMemClkMinMaxVfOffset)) +} + +// GetMemClkVfOffset mocks base method. +func (m *MockGPUDevice) GetMemClkVfOffset() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMemClkVfOffset") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMemClkVfOffset indicates an expected call of GetMemClkVfOffset. +func (mr *MockGPUDeviceMockRecorder) GetMemClkVfOffset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemClkVfOffset", reflect.TypeOf((*MockGPUDevice)(nil).GetMemClkVfOffset)) +} + +// GetMemoryAffinity mocks base method. +func (m *MockGPUDevice) GetMemoryAffinity(arg0 int, arg1 nvml.AffinityScope) ([]uint, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMemoryAffinity", arg0, arg1) + ret0, _ := ret[0].([]uint) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMemoryAffinity indicates an expected call of GetMemoryAffinity. +func (mr *MockGPUDeviceMockRecorder) GetMemoryAffinity(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemoryAffinity", reflect.TypeOf((*MockGPUDevice)(nil).GetMemoryAffinity), arg0, arg1) +} + +// GetMemoryBusWidth mocks base method. +func (m *MockGPUDevice) GetMemoryBusWidth() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMemoryBusWidth") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMemoryBusWidth indicates an expected call of GetMemoryBusWidth. +func (mr *MockGPUDeviceMockRecorder) GetMemoryBusWidth() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemoryBusWidth", reflect.TypeOf((*MockGPUDevice)(nil).GetMemoryBusWidth)) +} + +// GetMemoryErrorCounter mocks base method. +func (m *MockGPUDevice) GetMemoryErrorCounter(arg0 nvml.MemoryErrorType, arg1 nvml.EccCounterType, arg2 nvml.MemoryLocation) (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMemoryErrorCounter", arg0, arg1, arg2) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMemoryErrorCounter indicates an expected call of GetMemoryErrorCounter. +func (mr *MockGPUDeviceMockRecorder) GetMemoryErrorCounter(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemoryErrorCounter", reflect.TypeOf((*MockGPUDevice)(nil).GetMemoryErrorCounter), arg0, arg1, arg2) +} + +// GetMemoryInfo mocks base method. +func (m *MockGPUDevice) GetMemoryInfo() (nvml.Memory, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMemoryInfo") + ret0, _ := ret[0].(nvml.Memory) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMemoryInfo indicates an expected call of GetMemoryInfo. +func (mr *MockGPUDeviceMockRecorder) GetMemoryInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemoryInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetMemoryInfo)) +} + +// GetMemoryInfo_v2 mocks base method. +func (m *MockGPUDevice) GetMemoryInfo_v2() (nvml.Memory_v2, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMemoryInfo_v2") + ret0, _ := ret[0].(nvml.Memory_v2) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMemoryInfo_v2 indicates an expected call of GetMemoryInfo_v2. +func (mr *MockGPUDeviceMockRecorder) GetMemoryInfo_v2() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemoryInfo_v2", reflect.TypeOf((*MockGPUDevice)(nil).GetMemoryInfo_v2)) +} + +// GetMigDeviceHandleByIndex mocks base method. +func (m *MockGPUDevice) GetMigDeviceHandleByIndex(arg0 int) (nvml.Device, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMigDeviceHandleByIndex", arg0) + ret0, _ := ret[0].(nvml.Device) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMigDeviceHandleByIndex indicates an expected call of GetMigDeviceHandleByIndex. +func (mr *MockGPUDeviceMockRecorder) GetMigDeviceHandleByIndex(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMigDeviceHandleByIndex", reflect.TypeOf((*MockGPUDevice)(nil).GetMigDeviceHandleByIndex), arg0) +} + +// GetMigMode mocks base method. +func (m *MockGPUDevice) GetMigMode() (int, int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMigMode") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetMigMode indicates an expected call of GetMigMode. +func (mr *MockGPUDeviceMockRecorder) GetMigMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMigMode", reflect.TypeOf((*MockGPUDevice)(nil).GetMigMode)) +} + +// GetMinMaxClockOfPState mocks base method. +func (m *MockGPUDevice) GetMinMaxClockOfPState(arg0 nvml.ClockType, arg1 nvml.Pstates) (uint32, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMinMaxClockOfPState", arg0, arg1) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetMinMaxClockOfPState indicates an expected call of GetMinMaxClockOfPState. +func (mr *MockGPUDeviceMockRecorder) GetMinMaxClockOfPState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinMaxClockOfPState", reflect.TypeOf((*MockGPUDevice)(nil).GetMinMaxClockOfPState), arg0, arg1) +} + +// GetMinMaxFanSpeed mocks base method. +func (m *MockGPUDevice) GetMinMaxFanSpeed() (int, int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMinMaxFanSpeed") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetMinMaxFanSpeed indicates an expected call of GetMinMaxFanSpeed. +func (mr *MockGPUDeviceMockRecorder) GetMinMaxFanSpeed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinMaxFanSpeed", reflect.TypeOf((*MockGPUDevice)(nil).GetMinMaxFanSpeed)) +} + +// GetMinorNumber mocks base method. +func (m *MockGPUDevice) GetMinorNumber() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMinorNumber") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMinorNumber indicates an expected call of GetMinorNumber. +func (mr *MockGPUDeviceMockRecorder) GetMinorNumber() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinorNumber", reflect.TypeOf((*MockGPUDevice)(nil).GetMinorNumber)) +} + +// GetModuleId mocks base method. +func (m *MockGPUDevice) GetModuleId() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetModuleId") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetModuleId indicates an expected call of GetModuleId. +func (mr *MockGPUDeviceMockRecorder) GetModuleId() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModuleId", reflect.TypeOf((*MockGPUDevice)(nil).GetModuleId)) +} + +// GetMultiGpuBoard mocks base method. +func (m *MockGPUDevice) GetMultiGpuBoard() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultiGpuBoard") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetMultiGpuBoard indicates an expected call of GetMultiGpuBoard. +func (mr *MockGPUDeviceMockRecorder) GetMultiGpuBoard() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultiGpuBoard", reflect.TypeOf((*MockGPUDevice)(nil).GetMultiGpuBoard)) +} + +// GetName mocks base method. +func (m *MockGPUDevice) GetName() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetName") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetName indicates an expected call of GetName. +func (mr *MockGPUDeviceMockRecorder) GetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockGPUDevice)(nil).GetName)) +} + +// GetNumFans mocks base method. +func (m *MockGPUDevice) GetNumFans() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNumFans") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNumFans indicates an expected call of GetNumFans. +func (mr *MockGPUDeviceMockRecorder) GetNumFans() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNumFans", reflect.TypeOf((*MockGPUDevice)(nil).GetNumFans)) +} + +// GetNumGpuCores mocks base method. +func (m *MockGPUDevice) GetNumGpuCores() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNumGpuCores") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNumGpuCores indicates an expected call of GetNumGpuCores. +func (mr *MockGPUDeviceMockRecorder) GetNumGpuCores() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNumGpuCores", reflect.TypeOf((*MockGPUDevice)(nil).GetNumGpuCores)) +} + +// GetNumaNodeId mocks base method. +func (m *MockGPUDevice) GetNumaNodeId() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNumaNodeId") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNumaNodeId indicates an expected call of GetNumaNodeId. +func (mr *MockGPUDeviceMockRecorder) GetNumaNodeId() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNumaNodeId", reflect.TypeOf((*MockGPUDevice)(nil).GetNumaNodeId)) +} + +// GetNvLinkCapability mocks base method. +func (m *MockGPUDevice) GetNvLinkCapability(arg0 int, arg1 nvml.NvLinkCapability) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkCapability", arg0, arg1) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNvLinkCapability indicates an expected call of GetNvLinkCapability. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkCapability(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkCapability", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkCapability), arg0, arg1) +} + +// GetNvLinkErrorCounter mocks base method. +func (m *MockGPUDevice) GetNvLinkErrorCounter(arg0 int, arg1 nvml.NvLinkErrorCounter) (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkErrorCounter", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNvLinkErrorCounter indicates an expected call of GetNvLinkErrorCounter. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkErrorCounter(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkErrorCounter", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkErrorCounter), arg0, arg1) +} + +// GetNvLinkRemoteDeviceType mocks base method. +func (m *MockGPUDevice) GetNvLinkRemoteDeviceType(arg0 int) (nvml.IntNvLinkDeviceType, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkRemoteDeviceType", arg0) + ret0, _ := ret[0].(nvml.IntNvLinkDeviceType) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNvLinkRemoteDeviceType indicates an expected call of GetNvLinkRemoteDeviceType. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkRemoteDeviceType(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkRemoteDeviceType", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkRemoteDeviceType), arg0) +} + +// GetNvLinkRemotePciInfo mocks base method. +func (m *MockGPUDevice) GetNvLinkRemotePciInfo(arg0 int) (nvml.PciInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkRemotePciInfo", arg0) + ret0, _ := ret[0].(nvml.PciInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNvLinkRemotePciInfo indicates an expected call of GetNvLinkRemotePciInfo. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkRemotePciInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkRemotePciInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkRemotePciInfo), arg0) +} + +// GetNvLinkState mocks base method. +func (m *MockGPUDevice) GetNvLinkState(arg0 int) (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkState", arg0) + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNvLinkState indicates an expected call of GetNvLinkState. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkState", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkState), arg0) +} + +// GetNvLinkUtilizationControl mocks base method. +func (m *MockGPUDevice) GetNvLinkUtilizationControl(arg0, arg1 int) (nvml.NvLinkUtilizationControl, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkUtilizationControl", arg0, arg1) + ret0, _ := ret[0].(nvml.NvLinkUtilizationControl) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNvLinkUtilizationControl indicates an expected call of GetNvLinkUtilizationControl. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkUtilizationControl(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkUtilizationControl", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkUtilizationControl), arg0, arg1) +} + +// GetNvLinkUtilizationCounter mocks base method. +func (m *MockGPUDevice) GetNvLinkUtilizationCounter(arg0, arg1 int) (uint64, uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkUtilizationCounter", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(uint64) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetNvLinkUtilizationCounter indicates an expected call of GetNvLinkUtilizationCounter. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkUtilizationCounter(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkUtilizationCounter", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkUtilizationCounter), arg0, arg1) +} + +// GetNvLinkVersion mocks base method. +func (m *MockGPUDevice) GetNvLinkVersion(arg0 int) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNvLinkVersion", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetNvLinkVersion indicates an expected call of GetNvLinkVersion. +func (mr *MockGPUDeviceMockRecorder) GetNvLinkVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNvLinkVersion", reflect.TypeOf((*MockGPUDevice)(nil).GetNvLinkVersion), arg0) +} + +// GetOfaUtilization mocks base method. +func (m *MockGPUDevice) GetOfaUtilization() (uint32, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOfaUtilization") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetOfaUtilization indicates an expected call of GetOfaUtilization. +func (mr *MockGPUDeviceMockRecorder) GetOfaUtilization() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOfaUtilization", reflect.TypeOf((*MockGPUDevice)(nil).GetOfaUtilization)) +} + +// GetP2PStatus mocks base method. +func (m *MockGPUDevice) GetP2PStatus(arg0 nvml.Device, arg1 nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetP2PStatus", arg0, arg1) + ret0, _ := ret[0].(nvml.GpuP2PStatus) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetP2PStatus indicates an expected call of GetP2PStatus. +func (mr *MockGPUDeviceMockRecorder) GetP2PStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetP2PStatus", reflect.TypeOf((*MockGPUDevice)(nil).GetP2PStatus), arg0, arg1) +} + +// GetPciInfo mocks base method. +func (m *MockGPUDevice) GetPciInfo() (nvml.PciInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPciInfo") + ret0, _ := ret[0].(nvml.PciInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPciInfo indicates an expected call of GetPciInfo. +func (mr *MockGPUDeviceMockRecorder) GetPciInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPciInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetPciInfo)) +} + +// GetPciInfoExt mocks base method. +func (m *MockGPUDevice) GetPciInfoExt() (nvml.PciInfoExt, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPciInfoExt") + ret0, _ := ret[0].(nvml.PciInfoExt) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPciInfoExt indicates an expected call of GetPciInfoExt. +func (mr *MockGPUDeviceMockRecorder) GetPciInfoExt() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPciInfoExt", reflect.TypeOf((*MockGPUDevice)(nil).GetPciInfoExt)) +} + +// GetPcieLinkMaxSpeed mocks base method. +func (m *MockGPUDevice) GetPcieLinkMaxSpeed() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPcieLinkMaxSpeed") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPcieLinkMaxSpeed indicates an expected call of GetPcieLinkMaxSpeed. +func (mr *MockGPUDeviceMockRecorder) GetPcieLinkMaxSpeed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPcieLinkMaxSpeed", reflect.TypeOf((*MockGPUDevice)(nil).GetPcieLinkMaxSpeed)) +} + +// GetPcieReplayCounter mocks base method. +func (m *MockGPUDevice) GetPcieReplayCounter() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPcieReplayCounter") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPcieReplayCounter indicates an expected call of GetPcieReplayCounter. +func (mr *MockGPUDeviceMockRecorder) GetPcieReplayCounter() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPcieReplayCounter", reflect.TypeOf((*MockGPUDevice)(nil).GetPcieReplayCounter)) +} + +// GetPcieSpeed mocks base method. +func (m *MockGPUDevice) GetPcieSpeed() (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPcieSpeed") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPcieSpeed indicates an expected call of GetPcieSpeed. +func (mr *MockGPUDeviceMockRecorder) GetPcieSpeed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPcieSpeed", reflect.TypeOf((*MockGPUDevice)(nil).GetPcieSpeed)) +} + +// GetPcieThroughput mocks base method. +func (m *MockGPUDevice) GetPcieThroughput(arg0 nvml.PcieUtilCounter) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPcieThroughput", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPcieThroughput indicates an expected call of GetPcieThroughput. +func (mr *MockGPUDeviceMockRecorder) GetPcieThroughput(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPcieThroughput", reflect.TypeOf((*MockGPUDevice)(nil).GetPcieThroughput), arg0) +} + +// GetPerformanceState mocks base method. +func (m *MockGPUDevice) GetPerformanceState() (nvml.Pstates, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPerformanceState") + ret0, _ := ret[0].(nvml.Pstates) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPerformanceState indicates an expected call of GetPerformanceState. +func (mr *MockGPUDeviceMockRecorder) GetPerformanceState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPerformanceState", reflect.TypeOf((*MockGPUDevice)(nil).GetPerformanceState)) +} + +// GetPersistenceMode mocks base method. +func (m *MockGPUDevice) GetPersistenceMode() (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistenceMode") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPersistenceMode indicates an expected call of GetPersistenceMode. +func (mr *MockGPUDeviceMockRecorder) GetPersistenceMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistenceMode", reflect.TypeOf((*MockGPUDevice)(nil).GetPersistenceMode)) +} + +// GetPgpuMetadataString mocks base method. +func (m *MockGPUDevice) GetPgpuMetadataString() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPgpuMetadataString") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPgpuMetadataString indicates an expected call of GetPgpuMetadataString. +func (mr *MockGPUDeviceMockRecorder) GetPgpuMetadataString() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPgpuMetadataString", reflect.TypeOf((*MockGPUDevice)(nil).GetPgpuMetadataString)) +} + +// GetPowerManagementDefaultLimit mocks base method. +func (m *MockGPUDevice) GetPowerManagementDefaultLimit() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerManagementDefaultLimit") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPowerManagementDefaultLimit indicates an expected call of GetPowerManagementDefaultLimit. +func (mr *MockGPUDeviceMockRecorder) GetPowerManagementDefaultLimit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerManagementDefaultLimit", reflect.TypeOf((*MockGPUDevice)(nil).GetPowerManagementDefaultLimit)) +} + +// GetPowerManagementLimit mocks base method. +func (m *MockGPUDevice) GetPowerManagementLimit() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerManagementLimit") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPowerManagementLimit indicates an expected call of GetPowerManagementLimit. +func (mr *MockGPUDeviceMockRecorder) GetPowerManagementLimit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerManagementLimit", reflect.TypeOf((*MockGPUDevice)(nil).GetPowerManagementLimit)) +} + +// GetPowerManagementLimitConstraints mocks base method. +func (m *MockGPUDevice) GetPowerManagementLimitConstraints() (uint32, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerManagementLimitConstraints") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetPowerManagementLimitConstraints indicates an expected call of GetPowerManagementLimitConstraints. +func (mr *MockGPUDeviceMockRecorder) GetPowerManagementLimitConstraints() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerManagementLimitConstraints", reflect.TypeOf((*MockGPUDevice)(nil).GetPowerManagementLimitConstraints)) +} + +// GetPowerManagementMode mocks base method. +func (m *MockGPUDevice) GetPowerManagementMode() (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerManagementMode") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPowerManagementMode indicates an expected call of GetPowerManagementMode. +func (mr *MockGPUDeviceMockRecorder) GetPowerManagementMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerManagementMode", reflect.TypeOf((*MockGPUDevice)(nil).GetPowerManagementMode)) +} + +// GetPowerSource mocks base method. +func (m *MockGPUDevice) GetPowerSource() (nvml.PowerSource, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerSource") + ret0, _ := ret[0].(nvml.PowerSource) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPowerSource indicates an expected call of GetPowerSource. +func (mr *MockGPUDeviceMockRecorder) GetPowerSource() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerSource", reflect.TypeOf((*MockGPUDevice)(nil).GetPowerSource)) +} + +// GetPowerState mocks base method. +func (m *MockGPUDevice) GetPowerState() (nvml.Pstates, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerState") + ret0, _ := ret[0].(nvml.Pstates) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPowerState indicates an expected call of GetPowerState. +func (mr *MockGPUDeviceMockRecorder) GetPowerState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerState", reflect.TypeOf((*MockGPUDevice)(nil).GetPowerState)) +} + +// GetPowerUsage mocks base method. +func (m *MockGPUDevice) GetPowerUsage() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerUsage") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetPowerUsage indicates an expected call of GetPowerUsage. +func (mr *MockGPUDeviceMockRecorder) GetPowerUsage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerUsage", reflect.TypeOf((*MockGPUDevice)(nil).GetPowerUsage)) +} + +// GetProcessUtilization mocks base method. +func (m *MockGPUDevice) GetProcessUtilization(arg0 uint64) ([]nvml.ProcessUtilizationSample, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProcessUtilization", arg0) + ret0, _ := ret[0].([]nvml.ProcessUtilizationSample) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetProcessUtilization indicates an expected call of GetProcessUtilization. +func (mr *MockGPUDeviceMockRecorder) GetProcessUtilization(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProcessUtilization", reflect.TypeOf((*MockGPUDevice)(nil).GetProcessUtilization), arg0) +} + +// GetProcessesUtilizationInfo mocks base method. +func (m *MockGPUDevice) GetProcessesUtilizationInfo() (nvml.ProcessesUtilizationInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProcessesUtilizationInfo") + ret0, _ := ret[0].(nvml.ProcessesUtilizationInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetProcessesUtilizationInfo indicates an expected call of GetProcessesUtilizationInfo. +func (mr *MockGPUDeviceMockRecorder) GetProcessesUtilizationInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProcessesUtilizationInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetProcessesUtilizationInfo)) +} + +// GetRemappedRows mocks base method. +func (m *MockGPUDevice) GetRemappedRows() (int, int, bool, bool, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRemappedRows") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(bool) + ret3, _ := ret[3].(bool) + ret4, _ := ret[4].(nvml.Return) + return ret0, ret1, ret2, ret3, ret4 +} + +// GetRemappedRows indicates an expected call of GetRemappedRows. +func (mr *MockGPUDeviceMockRecorder) GetRemappedRows() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemappedRows", reflect.TypeOf((*MockGPUDevice)(nil).GetRemappedRows)) +} + +// GetRetiredPages mocks base method. +func (m *MockGPUDevice) GetRetiredPages(arg0 nvml.PageRetirementCause) ([]uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRetiredPages", arg0) + ret0, _ := ret[0].([]uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetRetiredPages indicates an expected call of GetRetiredPages. +func (mr *MockGPUDeviceMockRecorder) GetRetiredPages(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRetiredPages", reflect.TypeOf((*MockGPUDevice)(nil).GetRetiredPages), arg0) +} + +// GetRetiredPagesPendingStatus mocks base method. +func (m *MockGPUDevice) GetRetiredPagesPendingStatus() (nvml.EnableState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRetiredPagesPendingStatus") + ret0, _ := ret[0].(nvml.EnableState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetRetiredPagesPendingStatus indicates an expected call of GetRetiredPagesPendingStatus. +func (mr *MockGPUDeviceMockRecorder) GetRetiredPagesPendingStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRetiredPagesPendingStatus", reflect.TypeOf((*MockGPUDevice)(nil).GetRetiredPagesPendingStatus)) +} + +// GetRetiredPages_v2 mocks base method. +func (m *MockGPUDevice) GetRetiredPages_v2(arg0 nvml.PageRetirementCause) ([]uint64, []uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRetiredPages_v2", arg0) + ret0, _ := ret[0].([]uint64) + ret1, _ := ret[1].([]uint64) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetRetiredPages_v2 indicates an expected call of GetRetiredPages_v2. +func (mr *MockGPUDeviceMockRecorder) GetRetiredPages_v2(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRetiredPages_v2", reflect.TypeOf((*MockGPUDevice)(nil).GetRetiredPages_v2), arg0) +} + +// GetRowRemapperHistogram mocks base method. +func (m *MockGPUDevice) GetRowRemapperHistogram() (nvml.RowRemapperHistogramValues, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRowRemapperHistogram") + ret0, _ := ret[0].(nvml.RowRemapperHistogramValues) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetRowRemapperHistogram indicates an expected call of GetRowRemapperHistogram. +func (mr *MockGPUDeviceMockRecorder) GetRowRemapperHistogram() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRowRemapperHistogram", reflect.TypeOf((*MockGPUDevice)(nil).GetRowRemapperHistogram)) +} + +// GetRunningProcessDetailList mocks base method. +func (m *MockGPUDevice) GetRunningProcessDetailList() (nvml.ProcessDetailList, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRunningProcessDetailList") + ret0, _ := ret[0].(nvml.ProcessDetailList) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetRunningProcessDetailList indicates an expected call of GetRunningProcessDetailList. +func (mr *MockGPUDeviceMockRecorder) GetRunningProcessDetailList() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRunningProcessDetailList", reflect.TypeOf((*MockGPUDevice)(nil).GetRunningProcessDetailList)) +} + +// GetSamples mocks base method. +func (m *MockGPUDevice) GetSamples(arg0 nvml.SamplingType, arg1 uint64) (nvml.ValueType, []nvml.Sample, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSamples", arg0, arg1) + ret0, _ := ret[0].(nvml.ValueType) + ret1, _ := ret[1].([]nvml.Sample) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetSamples indicates an expected call of GetSamples. +func (mr *MockGPUDeviceMockRecorder) GetSamples(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSamples", reflect.TypeOf((*MockGPUDevice)(nil).GetSamples), arg0, arg1) +} + +// GetSerial mocks base method. +func (m *MockGPUDevice) GetSerial() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSerial") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetSerial indicates an expected call of GetSerial. +func (mr *MockGPUDeviceMockRecorder) GetSerial() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSerial", reflect.TypeOf((*MockGPUDevice)(nil).GetSerial)) +} + +// GetSramEccErrorStatus mocks base method. +func (m *MockGPUDevice) GetSramEccErrorStatus() (nvml.EccSramErrorStatus, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSramEccErrorStatus") + ret0, _ := ret[0].(nvml.EccSramErrorStatus) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetSramEccErrorStatus indicates an expected call of GetSramEccErrorStatus. +func (mr *MockGPUDeviceMockRecorder) GetSramEccErrorStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSramEccErrorStatus", reflect.TypeOf((*MockGPUDevice)(nil).GetSramEccErrorStatus)) +} + +// GetSupportedClocksEventReasons mocks base method. +func (m *MockGPUDevice) GetSupportedClocksEventReasons() (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupportedClocksEventReasons") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetSupportedClocksEventReasons indicates an expected call of GetSupportedClocksEventReasons. +func (mr *MockGPUDeviceMockRecorder) GetSupportedClocksEventReasons() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedClocksEventReasons", reflect.TypeOf((*MockGPUDevice)(nil).GetSupportedClocksEventReasons)) +} + +// GetSupportedClocksThrottleReasons mocks base method. +func (m *MockGPUDevice) GetSupportedClocksThrottleReasons() (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupportedClocksThrottleReasons") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetSupportedClocksThrottleReasons indicates an expected call of GetSupportedClocksThrottleReasons. +func (mr *MockGPUDeviceMockRecorder) GetSupportedClocksThrottleReasons() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedClocksThrottleReasons", reflect.TypeOf((*MockGPUDevice)(nil).GetSupportedClocksThrottleReasons)) +} + +// GetSupportedEventTypes mocks base method. +func (m *MockGPUDevice) GetSupportedEventTypes() (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupportedEventTypes") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetSupportedEventTypes indicates an expected call of GetSupportedEventTypes. +func (mr *MockGPUDeviceMockRecorder) GetSupportedEventTypes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedEventTypes", reflect.TypeOf((*MockGPUDevice)(nil).GetSupportedEventTypes)) +} + +// GetSupportedGraphicsClocks mocks base method. +func (m *MockGPUDevice) GetSupportedGraphicsClocks(arg0 int) (int, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupportedGraphicsClocks", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetSupportedGraphicsClocks indicates an expected call of GetSupportedGraphicsClocks. +func (mr *MockGPUDeviceMockRecorder) GetSupportedGraphicsClocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedGraphicsClocks", reflect.TypeOf((*MockGPUDevice)(nil).GetSupportedGraphicsClocks), arg0) +} + +// GetSupportedMemoryClocks mocks base method. +func (m *MockGPUDevice) GetSupportedMemoryClocks() (int, uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupportedMemoryClocks") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetSupportedMemoryClocks indicates an expected call of GetSupportedMemoryClocks. +func (mr *MockGPUDeviceMockRecorder) GetSupportedMemoryClocks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedMemoryClocks", reflect.TypeOf((*MockGPUDevice)(nil).GetSupportedMemoryClocks)) +} + +// GetSupportedPerformanceStates mocks base method. +func (m *MockGPUDevice) GetSupportedPerformanceStates() ([]nvml.Pstates, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupportedPerformanceStates") + ret0, _ := ret[0].([]nvml.Pstates) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetSupportedPerformanceStates indicates an expected call of GetSupportedPerformanceStates. +func (mr *MockGPUDeviceMockRecorder) GetSupportedPerformanceStates() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedPerformanceStates", reflect.TypeOf((*MockGPUDevice)(nil).GetSupportedPerformanceStates)) +} + +// GetSupportedVgpus mocks base method. +func (m *MockGPUDevice) GetSupportedVgpus() ([]nvml.VgpuTypeId, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupportedVgpus") + ret0, _ := ret[0].([]nvml.VgpuTypeId) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetSupportedVgpus indicates an expected call of GetSupportedVgpus. +func (mr *MockGPUDeviceMockRecorder) GetSupportedVgpus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedVgpus", reflect.TypeOf((*MockGPUDevice)(nil).GetSupportedVgpus)) +} + +// GetTargetFanSpeed mocks base method. +func (m *MockGPUDevice) GetTargetFanSpeed(arg0 int) (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTargetFanSpeed", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetTargetFanSpeed indicates an expected call of GetTargetFanSpeed. +func (mr *MockGPUDeviceMockRecorder) GetTargetFanSpeed(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTargetFanSpeed", reflect.TypeOf((*MockGPUDevice)(nil).GetTargetFanSpeed), arg0) +} + +// GetTemperature mocks base method. +func (m *MockGPUDevice) GetTemperature(arg0 nvml.TemperatureSensors) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTemperature", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetTemperature indicates an expected call of GetTemperature. +func (mr *MockGPUDeviceMockRecorder) GetTemperature(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTemperature", reflect.TypeOf((*MockGPUDevice)(nil).GetTemperature), arg0) +} + +// GetTemperatureThreshold mocks base method. +func (m *MockGPUDevice) GetTemperatureThreshold(arg0 nvml.TemperatureThresholds) (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTemperatureThreshold", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetTemperatureThreshold indicates an expected call of GetTemperatureThreshold. +func (mr *MockGPUDeviceMockRecorder) GetTemperatureThreshold(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTemperatureThreshold", reflect.TypeOf((*MockGPUDevice)(nil).GetTemperatureThreshold), arg0) +} + +// GetThermalSettings mocks base method. +func (m *MockGPUDevice) GetThermalSettings(arg0 uint32) (nvml.GpuThermalSettings, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetThermalSettings", arg0) + ret0, _ := ret[0].(nvml.GpuThermalSettings) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetThermalSettings indicates an expected call of GetThermalSettings. +func (mr *MockGPUDeviceMockRecorder) GetThermalSettings(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetThermalSettings", reflect.TypeOf((*MockGPUDevice)(nil).GetThermalSettings), arg0) +} + +// GetTopologyCommonAncestor mocks base method. +func (m *MockGPUDevice) GetTopologyCommonAncestor(arg0 nvml.Device) (nvml.GpuTopologyLevel, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTopologyCommonAncestor", arg0) + ret0, _ := ret[0].(nvml.GpuTopologyLevel) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetTopologyCommonAncestor indicates an expected call of GetTopologyCommonAncestor. +func (mr *MockGPUDeviceMockRecorder) GetTopologyCommonAncestor(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopologyCommonAncestor", reflect.TypeOf((*MockGPUDevice)(nil).GetTopologyCommonAncestor), arg0) +} + +// GetTopologyNearestGpus mocks base method. +func (m *MockGPUDevice) GetTopologyNearestGpus(arg0 nvml.GpuTopologyLevel) ([]nvml.Device, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTopologyNearestGpus", arg0) + ret0, _ := ret[0].([]nvml.Device) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetTopologyNearestGpus indicates an expected call of GetTopologyNearestGpus. +func (mr *MockGPUDeviceMockRecorder) GetTopologyNearestGpus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopologyNearestGpus", reflect.TypeOf((*MockGPUDevice)(nil).GetTopologyNearestGpus), arg0) +} + +// GetTotalEccErrors mocks base method. +func (m *MockGPUDevice) GetTotalEccErrors(arg0 nvml.MemoryErrorType, arg1 nvml.EccCounterType) (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTotalEccErrors", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetTotalEccErrors indicates an expected call of GetTotalEccErrors. +func (mr *MockGPUDeviceMockRecorder) GetTotalEccErrors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTotalEccErrors", reflect.TypeOf((*MockGPUDevice)(nil).GetTotalEccErrors), arg0, arg1) +} + +// GetTotalEnergyConsumption mocks base method. +func (m *MockGPUDevice) GetTotalEnergyConsumption() (uint64, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTotalEnergyConsumption") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetTotalEnergyConsumption indicates an expected call of GetTotalEnergyConsumption. +func (mr *MockGPUDeviceMockRecorder) GetTotalEnergyConsumption() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTotalEnergyConsumption", reflect.TypeOf((*MockGPUDevice)(nil).GetTotalEnergyConsumption)) +} + +// GetUUID mocks base method. +func (m *MockGPUDevice) GetUUID() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUUID") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetUUID indicates an expected call of GetUUID. +func (mr *MockGPUDeviceMockRecorder) GetUUID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUUID", reflect.TypeOf((*MockGPUDevice)(nil).GetUUID)) +} + +// GetUtilizationRates mocks base method. +func (m *MockGPUDevice) GetUtilizationRates() (nvml.Utilization, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUtilizationRates") + ret0, _ := ret[0].(nvml.Utilization) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetUtilizationRates indicates an expected call of GetUtilizationRates. +func (mr *MockGPUDeviceMockRecorder) GetUtilizationRates() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUtilizationRates", reflect.TypeOf((*MockGPUDevice)(nil).GetUtilizationRates)) +} + +// GetVbiosVersion mocks base method. +func (m *MockGPUDevice) GetVbiosVersion() (string, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVbiosVersion") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVbiosVersion indicates an expected call of GetVbiosVersion. +func (mr *MockGPUDeviceMockRecorder) GetVbiosVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVbiosVersion", reflect.TypeOf((*MockGPUDevice)(nil).GetVbiosVersion)) +} + +// GetVgpuCapabilities mocks base method. +func (m *MockGPUDevice) GetVgpuCapabilities(arg0 nvml.DeviceVgpuCapability) (bool, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuCapabilities", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuCapabilities indicates an expected call of GetVgpuCapabilities. +func (mr *MockGPUDeviceMockRecorder) GetVgpuCapabilities(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuCapabilities", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuCapabilities), arg0) +} + +// GetVgpuHeterogeneousMode mocks base method. +func (m *MockGPUDevice) GetVgpuHeterogeneousMode() (nvml.VgpuHeterogeneousMode, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuHeterogeneousMode") + ret0, _ := ret[0].(nvml.VgpuHeterogeneousMode) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuHeterogeneousMode indicates an expected call of GetVgpuHeterogeneousMode. +func (mr *MockGPUDeviceMockRecorder) GetVgpuHeterogeneousMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuHeterogeneousMode", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuHeterogeneousMode)) +} + +// GetVgpuInstancesUtilizationInfo mocks base method. +func (m *MockGPUDevice) GetVgpuInstancesUtilizationInfo() (nvml.VgpuInstancesUtilizationInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuInstancesUtilizationInfo") + ret0, _ := ret[0].(nvml.VgpuInstancesUtilizationInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuInstancesUtilizationInfo indicates an expected call of GetVgpuInstancesUtilizationInfo. +func (mr *MockGPUDeviceMockRecorder) GetVgpuInstancesUtilizationInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuInstancesUtilizationInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuInstancesUtilizationInfo)) +} + +// GetVgpuMetadata mocks base method. +func (m *MockGPUDevice) GetVgpuMetadata() (nvml.VgpuPgpuMetadata, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuMetadata") + ret0, _ := ret[0].(nvml.VgpuPgpuMetadata) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuMetadata indicates an expected call of GetVgpuMetadata. +func (mr *MockGPUDeviceMockRecorder) GetVgpuMetadata() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuMetadata", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuMetadata)) +} + +// GetVgpuProcessUtilization mocks base method. +func (m *MockGPUDevice) GetVgpuProcessUtilization(arg0 uint64) ([]nvml.VgpuProcessUtilizationSample, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuProcessUtilization", arg0) + ret0, _ := ret[0].([]nvml.VgpuProcessUtilizationSample) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuProcessUtilization indicates an expected call of GetVgpuProcessUtilization. +func (mr *MockGPUDeviceMockRecorder) GetVgpuProcessUtilization(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuProcessUtilization", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuProcessUtilization), arg0) +} + +// GetVgpuProcessesUtilizationInfo mocks base method. +func (m *MockGPUDevice) GetVgpuProcessesUtilizationInfo() (nvml.VgpuProcessesUtilizationInfo, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuProcessesUtilizationInfo") + ret0, _ := ret[0].(nvml.VgpuProcessesUtilizationInfo) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuProcessesUtilizationInfo indicates an expected call of GetVgpuProcessesUtilizationInfo. +func (mr *MockGPUDeviceMockRecorder) GetVgpuProcessesUtilizationInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuProcessesUtilizationInfo", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuProcessesUtilizationInfo)) +} + +// GetVgpuSchedulerCapabilities mocks base method. +func (m *MockGPUDevice) GetVgpuSchedulerCapabilities() (nvml.VgpuSchedulerCapabilities, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuSchedulerCapabilities") + ret0, _ := ret[0].(nvml.VgpuSchedulerCapabilities) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuSchedulerCapabilities indicates an expected call of GetVgpuSchedulerCapabilities. +func (mr *MockGPUDeviceMockRecorder) GetVgpuSchedulerCapabilities() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuSchedulerCapabilities", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuSchedulerCapabilities)) +} + +// GetVgpuSchedulerLog mocks base method. +func (m *MockGPUDevice) GetVgpuSchedulerLog() (nvml.VgpuSchedulerLog, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuSchedulerLog") + ret0, _ := ret[0].(nvml.VgpuSchedulerLog) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuSchedulerLog indicates an expected call of GetVgpuSchedulerLog. +func (mr *MockGPUDeviceMockRecorder) GetVgpuSchedulerLog() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuSchedulerLog", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuSchedulerLog)) +} + +// GetVgpuSchedulerState mocks base method. +func (m *MockGPUDevice) GetVgpuSchedulerState() (nvml.VgpuSchedulerGetState, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuSchedulerState") + ret0, _ := ret[0].(nvml.VgpuSchedulerGetState) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuSchedulerState indicates an expected call of GetVgpuSchedulerState. +func (mr *MockGPUDeviceMockRecorder) GetVgpuSchedulerState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuSchedulerState", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuSchedulerState)) +} + +// GetVgpuTypeCreatablePlacements mocks base method. +func (m *MockGPUDevice) GetVgpuTypeCreatablePlacements(arg0 nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuTypeCreatablePlacements", arg0) + ret0, _ := ret[0].(nvml.VgpuPlacementList) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuTypeCreatablePlacements indicates an expected call of GetVgpuTypeCreatablePlacements. +func (mr *MockGPUDeviceMockRecorder) GetVgpuTypeCreatablePlacements(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuTypeCreatablePlacements", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuTypeCreatablePlacements), arg0) +} + +// GetVgpuTypeSupportedPlacements mocks base method. +func (m *MockGPUDevice) GetVgpuTypeSupportedPlacements(arg0 nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuTypeSupportedPlacements", arg0) + ret0, _ := ret[0].(nvml.VgpuPlacementList) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVgpuTypeSupportedPlacements indicates an expected call of GetVgpuTypeSupportedPlacements. +func (mr *MockGPUDeviceMockRecorder) GetVgpuTypeSupportedPlacements(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuTypeSupportedPlacements", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuTypeSupportedPlacements), arg0) +} + +// GetVgpuUtilization mocks base method. +func (m *MockGPUDevice) GetVgpuUtilization(arg0 uint64) (nvml.ValueType, []nvml.VgpuInstanceUtilizationSample, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVgpuUtilization", arg0) + ret0, _ := ret[0].(nvml.ValueType) + ret1, _ := ret[1].([]nvml.VgpuInstanceUtilizationSample) + ret2, _ := ret[2].(nvml.Return) + return ret0, ret1, ret2 +} + +// GetVgpuUtilization indicates an expected call of GetVgpuUtilization. +func (mr *MockGPUDeviceMockRecorder) GetVgpuUtilization(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVgpuUtilization", reflect.TypeOf((*MockGPUDevice)(nil).GetVgpuUtilization), arg0) +} + +// GetViolationStatus mocks base method. +func (m *MockGPUDevice) GetViolationStatus(arg0 nvml.PerfPolicyType) (nvml.ViolationTime, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetViolationStatus", arg0) + ret0, _ := ret[0].(nvml.ViolationTime) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetViolationStatus indicates an expected call of GetViolationStatus. +func (mr *MockGPUDeviceMockRecorder) GetViolationStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetViolationStatus", reflect.TypeOf((*MockGPUDevice)(nil).GetViolationStatus), arg0) +} + +// GetVirtualizationMode mocks base method. +func (m *MockGPUDevice) GetVirtualizationMode() (nvml.GpuVirtualizationMode, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVirtualizationMode") + ret0, _ := ret[0].(nvml.GpuVirtualizationMode) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GetVirtualizationMode indicates an expected call of GetVirtualizationMode. +func (mr *MockGPUDeviceMockRecorder) GetVirtualizationMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualizationMode", reflect.TypeOf((*MockGPUDevice)(nil).GetVirtualizationMode)) +} + +// GpmMigSampleGet mocks base method. +func (m *MockGPUDevice) GpmMigSampleGet(arg0 int, arg1 nvml.GpmSample) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GpmMigSampleGet", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// GpmMigSampleGet indicates an expected call of GpmMigSampleGet. +func (mr *MockGPUDeviceMockRecorder) GpmMigSampleGet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GpmMigSampleGet", reflect.TypeOf((*MockGPUDevice)(nil).GpmMigSampleGet), arg0, arg1) +} + +// GpmQueryDeviceSupport mocks base method. +func (m *MockGPUDevice) GpmQueryDeviceSupport() (nvml.GpmSupport, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GpmQueryDeviceSupport") + ret0, _ := ret[0].(nvml.GpmSupport) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GpmQueryDeviceSupport indicates an expected call of GpmQueryDeviceSupport. +func (mr *MockGPUDeviceMockRecorder) GpmQueryDeviceSupport() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GpmQueryDeviceSupport", reflect.TypeOf((*MockGPUDevice)(nil).GpmQueryDeviceSupport)) +} + +// GpmQueryDeviceSupportV mocks base method. +func (m *MockGPUDevice) GpmQueryDeviceSupportV() nvml.GpmSupportV { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GpmQueryDeviceSupportV") + ret0, _ := ret[0].(nvml.GpmSupportV) + return ret0 +} + +// GpmQueryDeviceSupportV indicates an expected call of GpmQueryDeviceSupportV. +func (mr *MockGPUDeviceMockRecorder) GpmQueryDeviceSupportV() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GpmQueryDeviceSupportV", reflect.TypeOf((*MockGPUDevice)(nil).GpmQueryDeviceSupportV)) +} + +// GpmQueryIfStreamingEnabled mocks base method. +func (m *MockGPUDevice) GpmQueryIfStreamingEnabled() (uint32, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GpmQueryIfStreamingEnabled") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// GpmQueryIfStreamingEnabled indicates an expected call of GpmQueryIfStreamingEnabled. +func (mr *MockGPUDeviceMockRecorder) GpmQueryIfStreamingEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GpmQueryIfStreamingEnabled", reflect.TypeOf((*MockGPUDevice)(nil).GpmQueryIfStreamingEnabled)) +} + +// GpmSampleGet mocks base method. +func (m *MockGPUDevice) GpmSampleGet(arg0 nvml.GpmSample) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GpmSampleGet", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// GpmSampleGet indicates an expected call of GpmSampleGet. +func (mr *MockGPUDeviceMockRecorder) GpmSampleGet(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GpmSampleGet", reflect.TypeOf((*MockGPUDevice)(nil).GpmSampleGet), arg0) +} + +// GpmSetStreamingEnabled mocks base method. +func (m *MockGPUDevice) GpmSetStreamingEnabled(arg0 uint32) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GpmSetStreamingEnabled", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// GpmSetStreamingEnabled indicates an expected call of GpmSetStreamingEnabled. +func (mr *MockGPUDeviceMockRecorder) GpmSetStreamingEnabled(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GpmSetStreamingEnabled", reflect.TypeOf((*MockGPUDevice)(nil).GpmSetStreamingEnabled), arg0) +} + +// IsMigDeviceHandle mocks base method. +func (m *MockGPUDevice) IsMigDeviceHandle() (bool, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsMigDeviceHandle") + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// IsMigDeviceHandle indicates an expected call of IsMigDeviceHandle. +func (mr *MockGPUDeviceMockRecorder) IsMigDeviceHandle() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsMigDeviceHandle", reflect.TypeOf((*MockGPUDevice)(nil).IsMigDeviceHandle)) +} + +// OnSameBoard mocks base method. +func (m *MockGPUDevice) OnSameBoard(arg0 nvml.Device) (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnSameBoard", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// OnSameBoard indicates an expected call of OnSameBoard. +func (mr *MockGPUDeviceMockRecorder) OnSameBoard(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnSameBoard", reflect.TypeOf((*MockGPUDevice)(nil).OnSameBoard), arg0) +} + +// RegisterEvents mocks base method. +func (m *MockGPUDevice) RegisterEvents(arg0 uint64, arg1 nvml.EventSet) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterEvents", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// RegisterEvents indicates an expected call of RegisterEvents. +func (mr *MockGPUDeviceMockRecorder) RegisterEvents(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterEvents", reflect.TypeOf((*MockGPUDevice)(nil).RegisterEvents), arg0, arg1) +} + +// ResetApplicationsClocks mocks base method. +func (m *MockGPUDevice) ResetApplicationsClocks() nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetApplicationsClocks") + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ResetApplicationsClocks indicates an expected call of ResetApplicationsClocks. +func (mr *MockGPUDeviceMockRecorder) ResetApplicationsClocks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetApplicationsClocks", reflect.TypeOf((*MockGPUDevice)(nil).ResetApplicationsClocks)) +} + +// ResetGpuLockedClocks mocks base method. +func (m *MockGPUDevice) ResetGpuLockedClocks() nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetGpuLockedClocks") + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ResetGpuLockedClocks indicates an expected call of ResetGpuLockedClocks. +func (mr *MockGPUDeviceMockRecorder) ResetGpuLockedClocks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetGpuLockedClocks", reflect.TypeOf((*MockGPUDevice)(nil).ResetGpuLockedClocks)) +} + +// ResetMemoryLockedClocks mocks base method. +func (m *MockGPUDevice) ResetMemoryLockedClocks() nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetMemoryLockedClocks") + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ResetMemoryLockedClocks indicates an expected call of ResetMemoryLockedClocks. +func (mr *MockGPUDeviceMockRecorder) ResetMemoryLockedClocks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetMemoryLockedClocks", reflect.TypeOf((*MockGPUDevice)(nil).ResetMemoryLockedClocks)) +} + +// ResetNvLinkErrorCounters mocks base method. +func (m *MockGPUDevice) ResetNvLinkErrorCounters(arg0 int) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetNvLinkErrorCounters", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ResetNvLinkErrorCounters indicates an expected call of ResetNvLinkErrorCounters. +func (mr *MockGPUDeviceMockRecorder) ResetNvLinkErrorCounters(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetNvLinkErrorCounters", reflect.TypeOf((*MockGPUDevice)(nil).ResetNvLinkErrorCounters), arg0) +} + +// ResetNvLinkUtilizationCounter mocks base method. +func (m *MockGPUDevice) ResetNvLinkUtilizationCounter(arg0, arg1 int) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetNvLinkUtilizationCounter", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ResetNvLinkUtilizationCounter indicates an expected call of ResetNvLinkUtilizationCounter. +func (mr *MockGPUDeviceMockRecorder) ResetNvLinkUtilizationCounter(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetNvLinkUtilizationCounter", reflect.TypeOf((*MockGPUDevice)(nil).ResetNvLinkUtilizationCounter), arg0, arg1) +} + +// SetAPIRestriction mocks base method. +func (m *MockGPUDevice) SetAPIRestriction(arg0 nvml.RestrictedAPI, arg1 nvml.EnableState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetAPIRestriction", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetAPIRestriction indicates an expected call of SetAPIRestriction. +func (mr *MockGPUDeviceMockRecorder) SetAPIRestriction(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAPIRestriction", reflect.TypeOf((*MockGPUDevice)(nil).SetAPIRestriction), arg0, arg1) +} + +// SetAccountingMode mocks base method. +func (m *MockGPUDevice) SetAccountingMode(arg0 nvml.EnableState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetAccountingMode", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetAccountingMode indicates an expected call of SetAccountingMode. +func (mr *MockGPUDeviceMockRecorder) SetAccountingMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAccountingMode", reflect.TypeOf((*MockGPUDevice)(nil).SetAccountingMode), arg0) +} + +// SetApplicationsClocks mocks base method. +func (m *MockGPUDevice) SetApplicationsClocks(arg0, arg1 uint32) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetApplicationsClocks", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetApplicationsClocks indicates an expected call of SetApplicationsClocks. +func (mr *MockGPUDeviceMockRecorder) SetApplicationsClocks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetApplicationsClocks", reflect.TypeOf((*MockGPUDevice)(nil).SetApplicationsClocks), arg0, arg1) +} + +// SetAutoBoostedClocksEnabled mocks base method. +func (m *MockGPUDevice) SetAutoBoostedClocksEnabled(arg0 nvml.EnableState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetAutoBoostedClocksEnabled", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetAutoBoostedClocksEnabled indicates an expected call of SetAutoBoostedClocksEnabled. +func (mr *MockGPUDeviceMockRecorder) SetAutoBoostedClocksEnabled(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAutoBoostedClocksEnabled", reflect.TypeOf((*MockGPUDevice)(nil).SetAutoBoostedClocksEnabled), arg0) +} + +// SetComputeMode mocks base method. +func (m *MockGPUDevice) SetComputeMode(arg0 nvml.ComputeMode) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetComputeMode", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetComputeMode indicates an expected call of SetComputeMode. +func (mr *MockGPUDeviceMockRecorder) SetComputeMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetComputeMode", reflect.TypeOf((*MockGPUDevice)(nil).SetComputeMode), arg0) +} + +// SetConfComputeUnprotectedMemSize mocks base method. +func (m *MockGPUDevice) SetConfComputeUnprotectedMemSize(arg0 uint64) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetConfComputeUnprotectedMemSize", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetConfComputeUnprotectedMemSize indicates an expected call of SetConfComputeUnprotectedMemSize. +func (mr *MockGPUDeviceMockRecorder) SetConfComputeUnprotectedMemSize(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetConfComputeUnprotectedMemSize", reflect.TypeOf((*MockGPUDevice)(nil).SetConfComputeUnprotectedMemSize), arg0) +} + +// SetCpuAffinity mocks base method. +func (m *MockGPUDevice) SetCpuAffinity() nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetCpuAffinity") + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetCpuAffinity indicates an expected call of SetCpuAffinity. +func (mr *MockGPUDeviceMockRecorder) SetCpuAffinity() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCpuAffinity", reflect.TypeOf((*MockGPUDevice)(nil).SetCpuAffinity)) +} + +// SetDefaultAutoBoostedClocksEnabled mocks base method. +func (m *MockGPUDevice) SetDefaultAutoBoostedClocksEnabled(arg0 nvml.EnableState, arg1 uint32) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDefaultAutoBoostedClocksEnabled", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetDefaultAutoBoostedClocksEnabled indicates an expected call of SetDefaultAutoBoostedClocksEnabled. +func (mr *MockGPUDeviceMockRecorder) SetDefaultAutoBoostedClocksEnabled(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefaultAutoBoostedClocksEnabled", reflect.TypeOf((*MockGPUDevice)(nil).SetDefaultAutoBoostedClocksEnabled), arg0, arg1) +} + +// SetDefaultFanSpeed_v2 mocks base method. +func (m *MockGPUDevice) SetDefaultFanSpeed_v2(arg0 int) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDefaultFanSpeed_v2", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetDefaultFanSpeed_v2 indicates an expected call of SetDefaultFanSpeed_v2. +func (mr *MockGPUDeviceMockRecorder) SetDefaultFanSpeed_v2(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefaultFanSpeed_v2", reflect.TypeOf((*MockGPUDevice)(nil).SetDefaultFanSpeed_v2), arg0) +} + +// SetDriverModel mocks base method. +func (m *MockGPUDevice) SetDriverModel(arg0 nvml.DriverModel, arg1 uint32) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDriverModel", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetDriverModel indicates an expected call of SetDriverModel. +func (mr *MockGPUDeviceMockRecorder) SetDriverModel(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDriverModel", reflect.TypeOf((*MockGPUDevice)(nil).SetDriverModel), arg0, arg1) +} + +// SetEccMode mocks base method. +func (m *MockGPUDevice) SetEccMode(arg0 nvml.EnableState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetEccMode", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetEccMode indicates an expected call of SetEccMode. +func (mr *MockGPUDeviceMockRecorder) SetEccMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEccMode", reflect.TypeOf((*MockGPUDevice)(nil).SetEccMode), arg0) +} + +// SetFanControlPolicy mocks base method. +func (m *MockGPUDevice) SetFanControlPolicy(arg0 int, arg1 nvml.FanControlPolicy) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetFanControlPolicy", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetFanControlPolicy indicates an expected call of SetFanControlPolicy. +func (mr *MockGPUDeviceMockRecorder) SetFanControlPolicy(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFanControlPolicy", reflect.TypeOf((*MockGPUDevice)(nil).SetFanControlPolicy), arg0, arg1) +} + +// SetFanSpeed_v2 mocks base method. +func (m *MockGPUDevice) SetFanSpeed_v2(arg0, arg1 int) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetFanSpeed_v2", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetFanSpeed_v2 indicates an expected call of SetFanSpeed_v2. +func (mr *MockGPUDeviceMockRecorder) SetFanSpeed_v2(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFanSpeed_v2", reflect.TypeOf((*MockGPUDevice)(nil).SetFanSpeed_v2), arg0, arg1) +} + +// SetGpcClkVfOffset mocks base method. +func (m *MockGPUDevice) SetGpcClkVfOffset(arg0 int) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetGpcClkVfOffset", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetGpcClkVfOffset indicates an expected call of SetGpcClkVfOffset. +func (mr *MockGPUDeviceMockRecorder) SetGpcClkVfOffset(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGpcClkVfOffset", reflect.TypeOf((*MockGPUDevice)(nil).SetGpcClkVfOffset), arg0) +} + +// SetGpuLockedClocks mocks base method. +func (m *MockGPUDevice) SetGpuLockedClocks(arg0, arg1 uint32) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetGpuLockedClocks", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetGpuLockedClocks indicates an expected call of SetGpuLockedClocks. +func (mr *MockGPUDeviceMockRecorder) SetGpuLockedClocks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGpuLockedClocks", reflect.TypeOf((*MockGPUDevice)(nil).SetGpuLockedClocks), arg0, arg1) +} + +// SetGpuOperationMode mocks base method. +func (m *MockGPUDevice) SetGpuOperationMode(arg0 nvml.GpuOperationMode) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetGpuOperationMode", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetGpuOperationMode indicates an expected call of SetGpuOperationMode. +func (mr *MockGPUDeviceMockRecorder) SetGpuOperationMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGpuOperationMode", reflect.TypeOf((*MockGPUDevice)(nil).SetGpuOperationMode), arg0) +} + +// SetMemClkVfOffset mocks base method. +func (m *MockGPUDevice) SetMemClkVfOffset(arg0 int) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetMemClkVfOffset", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetMemClkVfOffset indicates an expected call of SetMemClkVfOffset. +func (mr *MockGPUDeviceMockRecorder) SetMemClkVfOffset(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMemClkVfOffset", reflect.TypeOf((*MockGPUDevice)(nil).SetMemClkVfOffset), arg0) +} + +// SetMemoryLockedClocks mocks base method. +func (m *MockGPUDevice) SetMemoryLockedClocks(arg0, arg1 uint32) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetMemoryLockedClocks", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetMemoryLockedClocks indicates an expected call of SetMemoryLockedClocks. +func (mr *MockGPUDeviceMockRecorder) SetMemoryLockedClocks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMemoryLockedClocks", reflect.TypeOf((*MockGPUDevice)(nil).SetMemoryLockedClocks), arg0, arg1) +} + +// SetMigMode mocks base method. +func (m *MockGPUDevice) SetMigMode(arg0 int) (nvml.Return, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetMigMode", arg0) + ret0, _ := ret[0].(nvml.Return) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// SetMigMode indicates an expected call of SetMigMode. +func (mr *MockGPUDeviceMockRecorder) SetMigMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMigMode", reflect.TypeOf((*MockGPUDevice)(nil).SetMigMode), arg0) +} + +// SetNvLinkDeviceLowPowerThreshold mocks base method. +func (m *MockGPUDevice) SetNvLinkDeviceLowPowerThreshold(arg0 *nvml.NvLinkPowerThres) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNvLinkDeviceLowPowerThreshold", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetNvLinkDeviceLowPowerThreshold indicates an expected call of SetNvLinkDeviceLowPowerThreshold. +func (mr *MockGPUDeviceMockRecorder) SetNvLinkDeviceLowPowerThreshold(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNvLinkDeviceLowPowerThreshold", reflect.TypeOf((*MockGPUDevice)(nil).SetNvLinkDeviceLowPowerThreshold), arg0) +} + +// SetNvLinkUtilizationControl mocks base method. +func (m *MockGPUDevice) SetNvLinkUtilizationControl(arg0, arg1 int, arg2 *nvml.NvLinkUtilizationControl, arg3 bool) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNvLinkUtilizationControl", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetNvLinkUtilizationControl indicates an expected call of SetNvLinkUtilizationControl. +func (mr *MockGPUDeviceMockRecorder) SetNvLinkUtilizationControl(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNvLinkUtilizationControl", reflect.TypeOf((*MockGPUDevice)(nil).SetNvLinkUtilizationControl), arg0, arg1, arg2, arg3) +} + +// SetPersistenceMode mocks base method. +func (m *MockGPUDevice) SetPersistenceMode(arg0 nvml.EnableState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPersistenceMode", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetPersistenceMode indicates an expected call of SetPersistenceMode. +func (mr *MockGPUDeviceMockRecorder) SetPersistenceMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPersistenceMode", reflect.TypeOf((*MockGPUDevice)(nil).SetPersistenceMode), arg0) +} + +// SetPowerManagementLimit mocks base method. +func (m *MockGPUDevice) SetPowerManagementLimit(arg0 uint32) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPowerManagementLimit", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetPowerManagementLimit indicates an expected call of SetPowerManagementLimit. +func (mr *MockGPUDeviceMockRecorder) SetPowerManagementLimit(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPowerManagementLimit", reflect.TypeOf((*MockGPUDevice)(nil).SetPowerManagementLimit), arg0) +} + +// SetPowerManagementLimit_v2 mocks base method. +func (m *MockGPUDevice) SetPowerManagementLimit_v2(arg0 *nvml.PowerValue_v2) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPowerManagementLimit_v2", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetPowerManagementLimit_v2 indicates an expected call of SetPowerManagementLimit_v2. +func (mr *MockGPUDeviceMockRecorder) SetPowerManagementLimit_v2(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPowerManagementLimit_v2", reflect.TypeOf((*MockGPUDevice)(nil).SetPowerManagementLimit_v2), arg0) +} + +// SetTemperatureThreshold mocks base method. +func (m *MockGPUDevice) SetTemperatureThreshold(arg0 nvml.TemperatureThresholds, arg1 int) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetTemperatureThreshold", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetTemperatureThreshold indicates an expected call of SetTemperatureThreshold. +func (mr *MockGPUDeviceMockRecorder) SetTemperatureThreshold(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTemperatureThreshold", reflect.TypeOf((*MockGPUDevice)(nil).SetTemperatureThreshold), arg0, arg1) +} + +// SetVgpuCapabilities mocks base method. +func (m *MockGPUDevice) SetVgpuCapabilities(arg0 nvml.DeviceVgpuCapability, arg1 nvml.EnableState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetVgpuCapabilities", arg0, arg1) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetVgpuCapabilities indicates an expected call of SetVgpuCapabilities. +func (mr *MockGPUDeviceMockRecorder) SetVgpuCapabilities(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVgpuCapabilities", reflect.TypeOf((*MockGPUDevice)(nil).SetVgpuCapabilities), arg0, arg1) +} + +// SetVgpuHeterogeneousMode mocks base method. +func (m *MockGPUDevice) SetVgpuHeterogeneousMode(arg0 nvml.VgpuHeterogeneousMode) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetVgpuHeterogeneousMode", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetVgpuHeterogeneousMode indicates an expected call of SetVgpuHeterogeneousMode. +func (mr *MockGPUDeviceMockRecorder) SetVgpuHeterogeneousMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVgpuHeterogeneousMode", reflect.TypeOf((*MockGPUDevice)(nil).SetVgpuHeterogeneousMode), arg0) +} + +// SetVgpuSchedulerState mocks base method. +func (m *MockGPUDevice) SetVgpuSchedulerState(arg0 *nvml.VgpuSchedulerSetState) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetVgpuSchedulerState", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetVgpuSchedulerState indicates an expected call of SetVgpuSchedulerState. +func (mr *MockGPUDeviceMockRecorder) SetVgpuSchedulerState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVgpuSchedulerState", reflect.TypeOf((*MockGPUDevice)(nil).SetVgpuSchedulerState), arg0) +} + +// SetVirtualizationMode mocks base method. +func (m *MockGPUDevice) SetVirtualizationMode(arg0 nvml.GpuVirtualizationMode) nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetVirtualizationMode", arg0) + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// SetVirtualizationMode indicates an expected call of SetVirtualizationMode. +func (mr *MockGPUDeviceMockRecorder) SetVirtualizationMode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVirtualizationMode", reflect.TypeOf((*MockGPUDevice)(nil).SetVirtualizationMode), arg0) +} + +// ValidateInforom mocks base method. +func (m *MockGPUDevice) ValidateInforom() nvml.Return { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateInforom") + ret0, _ := ret[0].(nvml.Return) + return ret0 +} + +// ValidateInforom indicates an expected call of ValidateInforom. +func (mr *MockGPUDeviceMockRecorder) ValidateInforom() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateInforom", reflect.TypeOf((*MockGPUDevice)(nil).ValidateInforom)) +} + +// VgpuTypeGetMaxInstances mocks base method. +func (m *MockGPUDevice) VgpuTypeGetMaxInstances(arg0 nvml.VgpuTypeId) (int, nvml.Return) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VgpuTypeGetMaxInstances", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(nvml.Return) + return ret0, ret1 +} + +// VgpuTypeGetMaxInstances indicates an expected call of VgpuTypeGetMaxInstances. +func (mr *MockGPUDeviceMockRecorder) VgpuTypeGetMaxInstances(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VgpuTypeGetMaxInstances", reflect.TypeOf((*MockGPUDevice)(nil).VgpuTypeGetMaxInstances), arg0) +} diff --git a/ecs-init/gpu/nvidia_gpu_manager.go b/ecs-init/gpu/nvidia_gpu_manager.go index 8a426d9e4ff..671b7984c8f 100644 --- a/ecs-init/gpu/nvidia_gpu_manager.go +++ b/ecs-init/gpu/nvidia_gpu_manager.go @@ -18,7 +18,7 @@ import ( "os" "path/filepath" - "github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml" + "github.com/NVIDIA/go-nvml/pkg/nvml" "github.com/cihub/seelog" "github.com/pkg/errors" ) @@ -129,7 +129,11 @@ func (n *NvidiaGPUManager) Initialize() error { var InitializeNVML = InitNVML func InitNVML() error { - return nvml.Init() + ret := nvml.Init() + if ret != nvml.SUCCESS { + return errors.New(nvml.ErrorString(ret)) + } + return nil } // Shutdown is for shutting down nvidia's nvml library @@ -144,7 +148,11 @@ func (n *NvidiaGPUManager) Shutdown() error { var ShutdownNVML = ShutdownNVMLib func ShutdownNVMLib() error { - return nvml.Shutdown() + ret := nvml.Shutdown() + if ret != nvml.SUCCESS { + return errors.New(nvml.ErrorString(ret)) + } + return nil } // GetDriverVersion is for getting Nvidia driver version on the instance @@ -159,7 +167,11 @@ func (n *NvidiaGPUManager) GetDriverVersion() (string, error) { var NvmlGetDriverVersion = GetNvidiaDriverVersion func GetNvidiaDriverVersion() (string, error) { - return nvml.GetDriverVersion() + version, ret := nvml.SystemGetDriverVersion() + if ret != nvml.SUCCESS { + return "", errors.New(nvml.ErrorString(ret)) + } + return version, nil } // GetGPUDeviceIDs is for getting the GPU device UUIDs @@ -169,14 +181,18 @@ func (n *NvidiaGPUManager) GetGPUDeviceIDs() ([]string, error) { return nil, errors.Wrapf(err, "error getting GPU device count for UUID detection") } var gpuIDs []string - var i uint - for i = 0; i < count; i++ { - device, err := NvmlNewDeviceLite(i) - if err != nil { - seelog.Errorf("error initializing device of index %d: %v", i, err) + for i := 0; i < count; i++ { + device, ret := nvml.DeviceGetHandleByIndex(i) + if ret != nvml.SUCCESS { + seelog.Errorf("Error initializing device of index %d: %v", i, nvml.ErrorString(ret)) continue } - gpuIDs = append(gpuIDs, device.UUID) + uuid, ret := nvml.DeviceGetUUID(device) + if ret != nvml.SUCCESS { + seelog.Errorf("Failed to get UUID for device at index %d: %v", i, nvml.ErrorString(ret)) + continue + } + gpuIDs = append(gpuIDs, uuid) } if len(gpuIDs) == 0 { return gpuIDs, errors.New("error initializing GPU devices") @@ -187,15 +203,12 @@ func (n *NvidiaGPUManager) GetGPUDeviceIDs() ([]string, error) { var NvmlGetDeviceCount = GetDeviceCount // GetDeviceCount is for getting the number of GPU devices in the instance -func GetDeviceCount() (uint, error) { - return nvml.GetDeviceCount() -} - -var NvmlNewDeviceLite = NewDeviceLite - -// NewDeviceLite is for initializing a new GPU device -func NewDeviceLite(idx uint) (*nvml.Device, error) { - return nvml.NewDeviceLite(idx) +func GetDeviceCount() (int, error) { + count, ret := nvml.DeviceGetCount() + if ret != nvml.SUCCESS { + return 0, errors.New(nvml.ErrorString(ret)) + } + return count, nil } // SaveGPUState saves gpu state info on the disk diff --git a/ecs-init/gpu/nvidia_gpu_manager_test.go b/ecs-init/gpu/nvidia_gpu_manager_test.go index d376488cfcb..a20dc076212 100644 --- a/ecs-init/gpu/nvidia_gpu_manager_test.go +++ b/ecs-init/gpu/nvidia_gpu_manager_test.go @@ -16,10 +16,13 @@ package gpu import ( "errors" "os" - "reflect" "testing" - "github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml" + mock_gpu "github.com/aws/amazon-ecs-agent/ecs-init/gpu/mocks" + + "github.com/NVIDIA/go-nvml/pkg/nvml" + "github.com/golang/mock/gomock" + _ "github.com/golang/mock/mockgen/model" "github.com/stretchr/testify/assert" ) @@ -48,19 +51,19 @@ func TestNVMLInitializeError(t *testing.T) { } func TestDeviceCount(t *testing.T) { - NvmlGetDeviceCount = func() (uint, error) { + NvmlGetDeviceCount = func() (int, error) { return 1, nil } defer func() { NvmlGetDeviceCount = GetDeviceCount }() count, err := NvmlGetDeviceCount() - assert.Equal(t, uint(1), count) + assert.Equal(t, int(1), count) assert.NoError(t, err) } func TestDeviceCountError(t *testing.T) { - NvmlGetDeviceCount = func() (uint, error) { + NvmlGetDeviceCount = func() (int, error) { return 0, errors.New("device count error") } defer func() { @@ -70,85 +73,95 @@ func TestDeviceCountError(t *testing.T) { assert.Error(t, err) } -func TestNewDeviceLite(t *testing.T) { - model := "Tesla-k80" - NvmlNewDeviceLite = func(idx uint) (*nvml.Device, error) { - return &nvml.Device{ - UUID: "gpu-0123", - Model: &model, - }, nil - } - defer func() { - NvmlNewDeviceLite = NewDeviceLite - }() - device, err := NvmlNewDeviceLite(4) - assert.NoError(t, err) - assert.Equal(t, "gpu-0123", device.UUID) - assert.Equal(t, model, *device.Model) -} +func TestGetGPUDeviceIDs(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + nvidiaGPUManager := NewNvidiaGPUManager() -func TestNewDeviceLiteError(t *testing.T) { - NvmlNewDeviceLite = func(idx uint) (*nvml.Device, error) { - return nil, errors.New("device error") + // Mock NvmlGetDeviceCount + oldNvmlGetDeviceCount := NvmlGetDeviceCount + NvmlGetDeviceCount = func() (int, error) { + return 2, nil } defer func() { - NvmlNewDeviceLite = NewDeviceLite + NvmlGetDeviceCount = oldNvmlGetDeviceCount }() - device, err := NvmlNewDeviceLite(4) - assert.Error(t, err) - assert.Nil(t, device) -} -func TestGetGPUDeviceIDs(t *testing.T) { - nvidiaGPUManager := NewNvidiaGPUManager() - NvmlGetDeviceCount = func() (uint, error) { - return 2, nil - } - NvmlNewDeviceLite = func(idx uint) (*nvml.Device, error) { - var uuid string + // Mock DeviceGetHandleByIndex and DeviceGetUUID + oldDeviceGetHandleByIndex := nvml.DeviceGetHandleByIndex + oldDeviceGetUUID := nvml.DeviceGetUUID + + mockDevice1 := mock_gpu.NewMockGPUDevice(ctrl) + mockDevice2 := mock_gpu.NewMockGPUDevice(ctrl) + + nvml.DeviceGetHandleByIndex = func(idx int) (nvml.Device, nvml.Return) { if idx == 0 { - uuid = "gpu-0123" - } else { - uuid = "gpu-1234" + return mockDevice1, nvml.SUCCESS } - return &nvml.Device{ - UUID: uuid, - }, nil + return mockDevice2, nvml.SUCCESS } + + mockDevice1.EXPECT().GetUUID().Return("gpu-0123", nvml.SUCCESS) + mockDevice2.EXPECT().GetUUID().Return("gpu-1234", nvml.SUCCESS) + defer func() { - NvmlGetDeviceCount = GetDeviceCount - NvmlNewDeviceLite = NewDeviceLite + nvml.DeviceGetHandleByIndex = oldDeviceGetHandleByIndex + nvml.DeviceGetUUID = oldDeviceGetUUID }() + + // Call the function and assert gpuIDs, err := nvidiaGPUManager.GetGPUDeviceIDs() assert.NoError(t, err) - assert.True(t, reflect.DeepEqual([]string{"gpu-0123", "gpu-1234"}, gpuIDs)) + assert.Equal(t, []string{"gpu-0123", "gpu-1234"}, gpuIDs) } func TestGetGPUDeviceIDsCountError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + nvidiaGPUManager := NewNvidiaGPUManager() - NvmlGetDeviceCount = func() (uint, error) { + + // Mock NvmlGetDeviceCount + oldNvmlGetDeviceCount := NvmlGetDeviceCount + NvmlGetDeviceCount = func() (int, error) { return 0, errors.New("device count error") } defer func() { - NvmlGetDeviceCount = GetDeviceCount + NvmlGetDeviceCount = oldNvmlGetDeviceCount }() + + // Call the function and assert gpuIDs, err := nvidiaGPUManager.GetGPUDeviceIDs() assert.Error(t, err) assert.Empty(t, gpuIDs) } func TestGetGPUDeviceIDsDeviceError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + nvidiaGPUManager := NewNvidiaGPUManager() - NvmlGetDeviceCount = func() (uint, error) { + + // Mock NvmlGetDeviceCount + oldNvmlGetDeviceCount := NvmlGetDeviceCount + NvmlGetDeviceCount = func() (int, error) { return 1, nil } - NvmlNewDeviceLite = func(idx uint) (*nvml.Device, error) { - return nil, errors.New("device error") + defer func() { + NvmlGetDeviceCount = oldNvmlGetDeviceCount + }() + + // Mock DeviceGetHandleByIndex to return an error + oldDeviceGetHandleByIndex := nvml.DeviceGetHandleByIndex + nvml.DeviceGetHandleByIndex = func(int) (nvml.Device, nvml.Return) { + return nil, nvml.ERROR_UNKNOWN } defer func() { - NvmlGetDeviceCount = GetDeviceCount - NvmlNewDeviceLite = NewDeviceLite + nvml.DeviceGetHandleByIndex = oldDeviceGetHandleByIndex }() + + // Call the function and assert gpuIDs, err := nvidiaGPUManager.GetGPUDeviceIDs() assert.Error(t, err) assert.Empty(t, gpuIDs) @@ -279,50 +292,64 @@ func TestSetupNoGPU(t *testing.T) { } func TestGPUSetupSuccessful(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + driverVersion := "396.44" nvidiaGPUManager := NewNvidiaGPUManager() + MatchFilePattern = func(string) ([]string, error) { return []string{"/dev/nvidia0", "/dev/nvidia1"}, nil } + InitializeNVML = func() error { return nil } + NvmlGetDriverVersion = func() (string, error) { return driverVersion, nil } - NvmlGetDeviceCount = func() (uint, error) { + + NvmlGetDeviceCount = func() (int, error) { return 2, nil } - NvmlNewDeviceLite = func(idx uint) (*nvml.Device, error) { - var uuid string + + mockDevice1 := mock_gpu.NewMockGPUDevice(ctrl) + mockDevice2 := mock_gpu.NewMockGPUDevice(ctrl) + mockDevice1.EXPECT().GetUUID().Return("gpu-0123", nvml.SUCCESS) + mockDevice2.EXPECT().GetUUID().Return("gpu-1234", nvml.SUCCESS) + + // Mock DeviceGetHandleByIndex + oldDeviceGetHandleByIndex := nvml.DeviceGetHandleByIndex + nvml.DeviceGetHandleByIndex = func(idx int) (nvml.Device, nvml.Return) { if idx == 0 { - uuid = "gpu-0123" - } else { - uuid = "gpu-1234" + return mockDevice1, nvml.SUCCESS } - return &nvml.Device{ - UUID: uuid, - }, nil + return mockDevice2, nvml.SUCCESS } + WriteContentToFile = func(string, []byte, os.FileMode) error { return nil } + ShutdownNVML = func() error { return nil } + defer func() { MatchFilePattern = FilePatternMatch InitializeNVML = InitNVML NvmlGetDriverVersion = GetNvidiaDriverVersion NvmlGetDeviceCount = GetDeviceCount - NvmlNewDeviceLite = NewDeviceLite + nvml.DeviceGetHandleByIndex = oldDeviceGetHandleByIndex WriteContentToFile = WriteToFile ShutdownNVML = ShutdownNVMLib }() + err := nvidiaGPUManager.Setup() assert.NoError(t, err) assert.Equal(t, driverVersion, nvidiaGPUManager.(*NvidiaGPUManager).DriverVersion) - assert.True(t, reflect.DeepEqual([]string{"gpu-0123", "gpu-1234"}, nvidiaGPUManager.(*NvidiaGPUManager).GPUIDs)) + assert.Equal(t, []string{"gpu-0123", "gpu-1234"}, nvidiaGPUManager.(*NvidiaGPUManager).GPUIDs) } func TestSetupNVMLError(t *testing.T) { diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/LICENSE b/ecs-init/vendor/github.com/NVIDIA/go-nvml/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl.go new file mode 100644 index 00000000000..34948a722ad --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl.go @@ -0,0 +1,117 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dl + +import ( + "errors" + "fmt" + "runtime" + "unsafe" +) + +// #cgo LDFLAGS: -ldl +// #include +// #include +import "C" + +const ( + RTLD_LAZY = C.RTLD_LAZY + RTLD_NOW = C.RTLD_NOW + RTLD_GLOBAL = C.RTLD_GLOBAL + RTLD_LOCAL = C.RTLD_LOCAL + RTLD_NODELETE = C.RTLD_NODELETE + RTLD_NOLOAD = C.RTLD_NOLOAD +) + +type DynamicLibrary struct { + Name string + Flags int + handle unsafe.Pointer +} + +func New(name string, flags int) *DynamicLibrary { + return &DynamicLibrary{ + Name: name, + Flags: flags, + handle: nil, + } +} + +func withOSLock(action func() error) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + return action() +} + +func dlError() error { + lastErr := C.dlerror() + if lastErr == nil { + return nil + } + return errors.New(C.GoString(lastErr)) +} + +func (dl *DynamicLibrary) Open() error { + name := C.CString(dl.Name) + defer C.free(unsafe.Pointer(name)) + + if err := withOSLock(func() error { + handle := C.dlopen(name, C.int(dl.Flags)) + if handle == nil { + return dlError() + } + dl.handle = handle + return nil + }); err != nil { + return err + } + return nil +} + +func (dl *DynamicLibrary) Close() error { + if dl.handle == nil { + return nil + } + if err := withOSLock(func() error { + if C.dlclose(dl.handle) != 0 { + return dlError() + } + dl.handle = nil + return nil + }); err != nil { + return err + } + return nil +} + +func (dl *DynamicLibrary) Lookup(symbol string) error { + sym := C.CString(symbol) + defer C.free(unsafe.Pointer(sym)) + + var pointer unsafe.Pointer + if err := withOSLock(func() error { + // Call dlError() to clear out any previous errors. + _ = dlError() + pointer = C.dlsym(dl.handle, sym) + if pointer == nil { + return fmt.Errorf("symbol %q not found: %w", symbol, dlError()) + } + return nil + }); err != nil { + return err + } + return nil +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl_linux.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl_linux.go new file mode 100644 index 00000000000..ae3acd07dd1 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl_linux.go @@ -0,0 +1,26 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package dl + +// #cgo LDFLAGS: -ldl +// #include +// #include +import "C" + +const ( + RTLD_DEEPBIND = C.RTLD_DEEPBIND +) diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/api.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/api.go new file mode 100644 index 00000000000..fdf27bda71c --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/api.go @@ -0,0 +1,56 @@ +/** +# Copyright 2023 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package nvml + +// ExtendedInterface defines a set of extensions to the core NVML API. +// +// TODO: For now the list of methods in this interface need to be kept in sync +// with the list of excluded methods for the Interface type in +// gen/nvml/generateapi.go. In the future we should automate this. +// +//go:generate moq -out mock/extendedinterface.go -pkg mock . ExtendedInterface:ExtendedInterface +type ExtendedInterface interface { + LookupSymbol(string) error +} + +// libraryOptions hold the paramaters than can be set by a LibraryOption +type libraryOptions struct { + path string + flags int +} + +// LibraryOption represents a functional option to configure the underlying NVML library +type LibraryOption func(*libraryOptions) + +// WithLibraryPath provides an option to set the library name to be used by the NVML library. +func WithLibraryPath(path string) LibraryOption { + return func(o *libraryOptions) { + o.path = path + } +} + +// SetLibraryOptions applies the specified options to the NVML library. +// If this is called when a library is already loaded, an error is raised. +func SetLibraryOptions(opts ...LibraryOption) error { + libnvml.Lock() + defer libnvml.Unlock() + if libnvml.refcount != 0 { + return errLibraryAlreadyLoaded + } + libnvml.init(opts...) + return nil +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h new file mode 100644 index 00000000000..b25c5e5d09c --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h @@ -0,0 +1,23 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +#include "nvml.h" +#include +#pragma once + +#define __CGOGEN 1 + diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers_static.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers_static.go new file mode 100644 index 00000000000..1f30eaae976 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers_static.go @@ -0,0 +1,75 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +import "C" + +var cgoAllocsUnknown = new(struct{}) + +type stringHeader struct { + Data unsafe.Pointer + Len int +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +func uint32SliceToIntSlice(s []uint32) []int { + ret := make([]int, len(s)) + for i := range s { + ret[i] = int(s[i]) + } + return ret +} + +func convertSlice[T any, I any](input []T) []I { + output := make([]I, len(input)) + for i, obj := range input { + switch v := any(obj).(type) { + case I: + output[i] = v + } + } + return output +} + +// packPCharString creates a Go string backed by *C.char and avoids copying. +func packPCharString(p *C.char) (raw string) { + if p != nil && *p != 0 { + h := (*stringHeader)(unsafe.Pointer(&raw)) + h.Data = unsafe.Pointer(p) + for *p != 0 { + p = (*C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) // p++ + } + h.Len = int(uintptr(unsafe.Pointer(p)) - uintptr(h.Data)) + } + return +} + +// unpackPCharString represents the data from Go string as *C.char and avoids copying. +func unpackPCharString(str string) (*C.char, *struct{}) { + h := (*stringHeader)(unsafe.Pointer(&str)) + return (*C.char)(h.Data), cgoAllocsUnknown +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go new file mode 100644 index 00000000000..1ccb5016b8f --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go @@ -0,0 +1,1538 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +package nvml + +/* +#cgo linux LDFLAGS: -Wl,--export-dynamic -Wl,--unresolved-symbols=ignore-in-object-files +#cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup +#cgo CFLAGS: -DNVML_NO_UNVERSIONED_FUNC_DEFS=1 +#include "nvml.h" +#include +#include "cgo_helpers.h" +*/ +import "C" + +const ( + // NO_UNVERSIONED_FUNC_DEFS as defined in go-nvml/:24 + NO_UNVERSIONED_FUNC_DEFS = 1 + // API_VERSION as defined in nvml/nvml.h + API_VERSION = 12 + // API_VERSION_STR as defined in nvml/nvml.h + API_VERSION_STR = "12" + // VALUE_NOT_AVAILABLE as defined in nvml/nvml.h + VALUE_NOT_AVAILABLE = -1 + // DEVICE_PCI_BUS_ID_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_BUFFER_SIZE = 32 + // DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE = 16 + // DEVICE_PCI_BUS_ID_LEGACY_FMT as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_LEGACY_FMT = "%04X:%02X:%02X.0" + // DEVICE_PCI_BUS_ID_FMT as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_FMT = "%08X:%02X:%02X.0" + // NVLINK_MAX_LINKS as defined in nvml/nvml.h + NVLINK_MAX_LINKS = 18 + // TOPOLOGY_CPU as defined in nvml/nvml.h + TOPOLOGY_CPU = 0 + // MAX_PHYSICAL_BRIDGE as defined in nvml/nvml.h + MAX_PHYSICAL_BRIDGE = 128 + // MAX_THERMAL_SENSORS_PER_GPU as defined in nvml/nvml.h + MAX_THERMAL_SENSORS_PER_GPU = 3 + // FlagDefault as defined in nvml/nvml.h + FlagDefault = 0 + // FlagForce as defined in nvml/nvml.h + FlagForce = 1 + // SINGLE_BIT_ECC as defined in nvml/nvml.h + SINGLE_BIT_ECC = 0 + // DOUBLE_BIT_ECC as defined in nvml/nvml.h + DOUBLE_BIT_ECC = 0 + // MAX_GPU_PERF_PSTATES as defined in nvml/nvml.h + MAX_GPU_PERF_PSTATES = 16 + // GRID_LICENSE_EXPIRY_NOT_AVAILABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_AVAILABLE = 0 + // GRID_LICENSE_EXPIRY_INVALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_INVALID = 1 + // GRID_LICENSE_EXPIRY_VALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_VALID = 2 + // GRID_LICENSE_EXPIRY_NOT_APPLICABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_APPLICABLE = 3 + // GRID_LICENSE_EXPIRY_PERMANENT as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_PERMANENT = 4 + // GRID_LICENSE_BUFFER_SIZE as defined in nvml/nvml.h + GRID_LICENSE_BUFFER_SIZE = 128 + // VGPU_NAME_BUFFER_SIZE as defined in nvml/nvml.h + VGPU_NAME_BUFFER_SIZE = 64 + // GRID_LICENSE_FEATURE_MAX_COUNT as defined in nvml/nvml.h + GRID_LICENSE_FEATURE_MAX_COUNT = 3 + // INVALID_VGPU_PLACEMENT_ID as defined in nvml/nvml.h + INVALID_VGPU_PLACEMENT_ID = 65535 + // VGPU_SCHEDULER_POLICY_UNKNOWN as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_UNKNOWN = 0 + // VGPU_SCHEDULER_POLICY_BEST_EFFORT as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_BEST_EFFORT = 1 + // VGPU_SCHEDULER_POLICY_EQUAL_SHARE as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_EQUAL_SHARE = 2 + // VGPU_SCHEDULER_POLICY_FIXED_SHARE as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_FIXED_SHARE = 3 + // SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT as defined in nvml/nvml.h + SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT = 3 + // SCHEDULER_SW_MAX_LOG_ENTRIES as defined in nvml/nvml.h + SCHEDULER_SW_MAX_LOG_ENTRIES = 200 + // VGPU_SCHEDULER_ARR_DEFAULT as defined in nvml/nvml.h + VGPU_SCHEDULER_ARR_DEFAULT = 0 + // VGPU_SCHEDULER_ARR_DISABLE as defined in nvml/nvml.h + VGPU_SCHEDULER_ARR_DISABLE = 1 + // VGPU_SCHEDULER_ARR_ENABLE as defined in nvml/nvml.h + VGPU_SCHEDULER_ARR_ENABLE = 2 + // GRID_LICENSE_STATE_UNKNOWN as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNKNOWN = 0 + // GRID_LICENSE_STATE_UNINITIALIZED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNINITIALIZED = 1 + // GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED = 2 + // GRID_LICENSE_STATE_UNLICENSED_RESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_RESTRICTED = 3 + // GRID_LICENSE_STATE_UNLICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED = 4 + // GRID_LICENSE_STATE_LICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_LICENSED = 5 + // GSP_FIRMWARE_VERSION_BUF_SIZE as defined in nvml/nvml.h + GSP_FIRMWARE_VERSION_BUF_SIZE = 64 + // DEVICE_ARCH_KEPLER as defined in nvml/nvml.h + DEVICE_ARCH_KEPLER = 2 + // DEVICE_ARCH_MAXWELL as defined in nvml/nvml.h + DEVICE_ARCH_MAXWELL = 3 + // DEVICE_ARCH_PASCAL as defined in nvml/nvml.h + DEVICE_ARCH_PASCAL = 4 + // DEVICE_ARCH_VOLTA as defined in nvml/nvml.h + DEVICE_ARCH_VOLTA = 5 + // DEVICE_ARCH_TURING as defined in nvml/nvml.h + DEVICE_ARCH_TURING = 6 + // DEVICE_ARCH_AMPERE as defined in nvml/nvml.h + DEVICE_ARCH_AMPERE = 7 + // DEVICE_ARCH_ADA as defined in nvml/nvml.h + DEVICE_ARCH_ADA = 8 + // DEVICE_ARCH_HOPPER as defined in nvml/nvml.h + DEVICE_ARCH_HOPPER = 9 + // DEVICE_ARCH_UNKNOWN as defined in nvml/nvml.h + DEVICE_ARCH_UNKNOWN = 4294967295 + // BUS_TYPE_UNKNOWN as defined in nvml/nvml.h + BUS_TYPE_UNKNOWN = 0 + // BUS_TYPE_PCI as defined in nvml/nvml.h + BUS_TYPE_PCI = 1 + // BUS_TYPE_PCIE as defined in nvml/nvml.h + BUS_TYPE_PCIE = 2 + // BUS_TYPE_FPCI as defined in nvml/nvml.h + BUS_TYPE_FPCI = 3 + // BUS_TYPE_AGP as defined in nvml/nvml.h + BUS_TYPE_AGP = 4 + // FAN_POLICY_TEMPERATURE_CONTINOUS_SW as defined in nvml/nvml.h + FAN_POLICY_TEMPERATURE_CONTINOUS_SW = 0 + // FAN_POLICY_MANUAL as defined in nvml/nvml.h + FAN_POLICY_MANUAL = 1 + // POWER_SOURCE_AC as defined in nvml/nvml.h + POWER_SOURCE_AC = 0 + // POWER_SOURCE_BATTERY as defined in nvml/nvml.h + POWER_SOURCE_BATTERY = 1 + // POWER_SOURCE_UNDERSIZED as defined in nvml/nvml.h + POWER_SOURCE_UNDERSIZED = 2 + // PCIE_LINK_MAX_SPEED_INVALID as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_INVALID = 0 + // PCIE_LINK_MAX_SPEED_2500MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_2500MBPS = 1 + // PCIE_LINK_MAX_SPEED_5000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_5000MBPS = 2 + // PCIE_LINK_MAX_SPEED_8000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_8000MBPS = 3 + // PCIE_LINK_MAX_SPEED_16000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_16000MBPS = 4 + // PCIE_LINK_MAX_SPEED_32000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_32000MBPS = 5 + // PCIE_LINK_MAX_SPEED_64000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_64000MBPS = 6 + // ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED as defined in nvml/nvml.h + ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED = 0 + // ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED as defined in nvml/nvml.h + ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED = 1 + // MAX_GPU_UTILIZATIONS as defined in nvml/nvml.h + MAX_GPU_UTILIZATIONS = 8 + // FI_DEV_ECC_CURRENT as defined in nvml/nvml.h + FI_DEV_ECC_CURRENT = 1 + // FI_DEV_ECC_PENDING as defined in nvml/nvml.h + FI_DEV_ECC_PENDING = 2 + // FI_DEV_ECC_SBE_VOL_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_TOTAL = 3 + // FI_DEV_ECC_DBE_VOL_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_TOTAL = 4 + // FI_DEV_ECC_SBE_AGG_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_TOTAL = 5 + // FI_DEV_ECC_DBE_AGG_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_TOTAL = 6 + // FI_DEV_ECC_SBE_VOL_L1 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_L1 = 7 + // FI_DEV_ECC_DBE_VOL_L1 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_L1 = 8 + // FI_DEV_ECC_SBE_VOL_L2 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_L2 = 9 + // FI_DEV_ECC_DBE_VOL_L2 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_L2 = 10 + // FI_DEV_ECC_SBE_VOL_DEV as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_DEV = 11 + // FI_DEV_ECC_DBE_VOL_DEV as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_DEV = 12 + // FI_DEV_ECC_SBE_VOL_REG as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_REG = 13 + // FI_DEV_ECC_DBE_VOL_REG as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_REG = 14 + // FI_DEV_ECC_SBE_VOL_TEX as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_TEX = 15 + // FI_DEV_ECC_DBE_VOL_TEX as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_TEX = 16 + // FI_DEV_ECC_DBE_VOL_CBU as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_CBU = 17 + // FI_DEV_ECC_SBE_AGG_L1 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_L1 = 18 + // FI_DEV_ECC_DBE_AGG_L1 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_L1 = 19 + // FI_DEV_ECC_SBE_AGG_L2 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_L2 = 20 + // FI_DEV_ECC_DBE_AGG_L2 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_L2 = 21 + // FI_DEV_ECC_SBE_AGG_DEV as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_DEV = 22 + // FI_DEV_ECC_DBE_AGG_DEV as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_DEV = 23 + // FI_DEV_ECC_SBE_AGG_REG as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_REG = 24 + // FI_DEV_ECC_DBE_AGG_REG as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_REG = 25 + // FI_DEV_ECC_SBE_AGG_TEX as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_TEX = 26 + // FI_DEV_ECC_DBE_AGG_TEX as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_TEX = 27 + // FI_DEV_ECC_DBE_AGG_CBU as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_CBU = 28 + // FI_DEV_RETIRED_SBE as defined in nvml/nvml.h + FI_DEV_RETIRED_SBE = 29 + // FI_DEV_RETIRED_DBE as defined in nvml/nvml.h + FI_DEV_RETIRED_DBE = 30 + // FI_DEV_RETIRED_PENDING as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING = 31 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 = 32 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 = 33 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 = 34 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 = 35 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 = 36 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 = 37 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL = 38 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 = 39 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 = 40 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 = 41 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 = 42 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 = 43 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 = 44 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL = 45 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 = 46 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 = 47 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 = 48 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 = 49 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 = 50 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 = 51 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL = 52 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 = 53 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 = 54 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 = 55 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 = 56 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 = 57 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 = 58 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL = 59 + // FI_DEV_NVLINK_BANDWIDTH_C0_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L0 = 60 + // FI_DEV_NVLINK_BANDWIDTH_C0_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L1 = 61 + // FI_DEV_NVLINK_BANDWIDTH_C0_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L2 = 62 + // FI_DEV_NVLINK_BANDWIDTH_C0_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L3 = 63 + // FI_DEV_NVLINK_BANDWIDTH_C0_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L4 = 64 + // FI_DEV_NVLINK_BANDWIDTH_C0_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L5 = 65 + // FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL = 66 + // FI_DEV_NVLINK_BANDWIDTH_C1_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L0 = 67 + // FI_DEV_NVLINK_BANDWIDTH_C1_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L1 = 68 + // FI_DEV_NVLINK_BANDWIDTH_C1_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L2 = 69 + // FI_DEV_NVLINK_BANDWIDTH_C1_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L3 = 70 + // FI_DEV_NVLINK_BANDWIDTH_C1_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L4 = 71 + // FI_DEV_NVLINK_BANDWIDTH_C1_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L5 = 72 + // FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL = 73 + // FI_DEV_PERF_POLICY_POWER as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_POWER = 74 + // FI_DEV_PERF_POLICY_THERMAL as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_THERMAL = 75 + // FI_DEV_PERF_POLICY_SYNC_BOOST as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_SYNC_BOOST = 76 + // FI_DEV_PERF_POLICY_BOARD_LIMIT as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_BOARD_LIMIT = 77 + // FI_DEV_PERF_POLICY_LOW_UTILIZATION as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_LOW_UTILIZATION = 78 + // FI_DEV_PERF_POLICY_RELIABILITY as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_RELIABILITY = 79 + // FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS = 80 + // FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS = 81 + // FI_DEV_MEMORY_TEMP as defined in nvml/nvml.h + FI_DEV_MEMORY_TEMP = 82 + // FI_DEV_TOTAL_ENERGY_CONSUMPTION as defined in nvml/nvml.h + FI_DEV_TOTAL_ENERGY_CONSUMPTION = 83 + // FI_DEV_NVLINK_SPEED_MBPS_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L0 = 84 + // FI_DEV_NVLINK_SPEED_MBPS_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L1 = 85 + // FI_DEV_NVLINK_SPEED_MBPS_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L2 = 86 + // FI_DEV_NVLINK_SPEED_MBPS_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L3 = 87 + // FI_DEV_NVLINK_SPEED_MBPS_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L4 = 88 + // FI_DEV_NVLINK_SPEED_MBPS_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L5 = 89 + // FI_DEV_NVLINK_SPEED_MBPS_COMMON as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_COMMON = 90 + // FI_DEV_NVLINK_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_NVLINK_LINK_COUNT = 91 + // FI_DEV_RETIRED_PENDING_SBE as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING_SBE = 92 + // FI_DEV_RETIRED_PENDING_DBE as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING_DBE = 93 + // FI_DEV_PCIE_REPLAY_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_REPLAY_COUNTER = 94 + // FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER = 95 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 = 96 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 = 97 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 = 98 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 = 99 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 = 100 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 = 101 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 = 102 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 = 103 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 = 104 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 = 105 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 = 106 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 = 107 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 = 108 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 = 109 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 = 110 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 = 111 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 = 112 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 = 113 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 = 114 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 = 115 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 = 116 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 = 117 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 = 118 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 = 119 + // FI_DEV_NVLINK_BANDWIDTH_C0_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L6 = 120 + // FI_DEV_NVLINK_BANDWIDTH_C0_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L7 = 121 + // FI_DEV_NVLINK_BANDWIDTH_C0_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L8 = 122 + // FI_DEV_NVLINK_BANDWIDTH_C0_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L9 = 123 + // FI_DEV_NVLINK_BANDWIDTH_C0_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L10 = 124 + // FI_DEV_NVLINK_BANDWIDTH_C0_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L11 = 125 + // FI_DEV_NVLINK_BANDWIDTH_C1_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L6 = 126 + // FI_DEV_NVLINK_BANDWIDTH_C1_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L7 = 127 + // FI_DEV_NVLINK_BANDWIDTH_C1_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L8 = 128 + // FI_DEV_NVLINK_BANDWIDTH_C1_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L9 = 129 + // FI_DEV_NVLINK_BANDWIDTH_C1_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L10 = 130 + // FI_DEV_NVLINK_BANDWIDTH_C1_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L11 = 131 + // FI_DEV_NVLINK_SPEED_MBPS_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L6 = 132 + // FI_DEV_NVLINK_SPEED_MBPS_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L7 = 133 + // FI_DEV_NVLINK_SPEED_MBPS_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L8 = 134 + // FI_DEV_NVLINK_SPEED_MBPS_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L9 = 135 + // FI_DEV_NVLINK_SPEED_MBPS_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L10 = 136 + // FI_DEV_NVLINK_SPEED_MBPS_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L11 = 137 + // FI_DEV_NVLINK_THROUGHPUT_DATA_TX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_DATA_TX = 138 + // FI_DEV_NVLINK_THROUGHPUT_DATA_RX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_DATA_RX = 139 + // FI_DEV_NVLINK_THROUGHPUT_RAW_TX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_RAW_TX = 140 + // FI_DEV_NVLINK_THROUGHPUT_RAW_RX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_RAW_RX = 141 + // FI_DEV_REMAPPED_COR as defined in nvml/nvml.h + FI_DEV_REMAPPED_COR = 142 + // FI_DEV_REMAPPED_UNC as defined in nvml/nvml.h + FI_DEV_REMAPPED_UNC = 143 + // FI_DEV_REMAPPED_PENDING as defined in nvml/nvml.h + FI_DEV_REMAPPED_PENDING = 144 + // FI_DEV_REMAPPED_FAILURE as defined in nvml/nvml.h + FI_DEV_REMAPPED_FAILURE = 145 + // FI_DEV_NVLINK_REMOTE_NVLINK_ID as defined in nvml/nvml.h + FI_DEV_NVLINK_REMOTE_NVLINK_ID = 146 + // FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT = 147 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 = 148 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 = 149 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 = 150 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 = 151 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 = 152 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 = 153 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 = 154 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 = 155 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 = 156 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 = 157 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 = 158 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 = 159 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL = 160 + // FI_DEV_NVLINK_ERROR_DL_REPLAY as defined in nvml/nvml.h + FI_DEV_NVLINK_ERROR_DL_REPLAY = 161 + // FI_DEV_NVLINK_ERROR_DL_RECOVERY as defined in nvml/nvml.h + FI_DEV_NVLINK_ERROR_DL_RECOVERY = 162 + // FI_DEV_NVLINK_ERROR_DL_CRC as defined in nvml/nvml.h + FI_DEV_NVLINK_ERROR_DL_CRC = 163 + // FI_DEV_NVLINK_GET_SPEED as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_SPEED = 164 + // FI_DEV_NVLINK_GET_STATE as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_STATE = 165 + // FI_DEV_NVLINK_GET_VERSION as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_VERSION = 166 + // FI_DEV_NVLINK_GET_POWER_STATE as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_STATE = 167 + // FI_DEV_NVLINK_GET_POWER_THRESHOLD as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_THRESHOLD = 168 + // FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER = 169 + // FI_DEV_C2C_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_C2C_LINK_COUNT = 170 + // FI_DEV_C2C_LINK_GET_STATUS as defined in nvml/nvml.h + FI_DEV_C2C_LINK_GET_STATUS = 171 + // FI_DEV_C2C_LINK_GET_MAX_BW as defined in nvml/nvml.h + FI_DEV_C2C_LINK_GET_MAX_BW = 172 + // FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS = 173 + // FI_DEV_PCIE_COUNT_NAKS_RECEIVED as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_NAKS_RECEIVED = 174 + // FI_DEV_PCIE_COUNT_RECEIVER_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_RECEIVER_ERROR = 175 + // FI_DEV_PCIE_COUNT_BAD_TLP as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_BAD_TLP = 176 + // FI_DEV_PCIE_COUNT_NAKS_SENT as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_NAKS_SENT = 177 + // FI_DEV_PCIE_COUNT_BAD_DLLP as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_BAD_DLLP = 178 + // FI_DEV_PCIE_COUNT_NON_FATAL_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_NON_FATAL_ERROR = 179 + // FI_DEV_PCIE_COUNT_FATAL_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_FATAL_ERROR = 180 + // FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ = 181 + // FI_DEV_PCIE_COUNT_LCRC_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_LCRC_ERROR = 182 + // FI_DEV_PCIE_COUNT_LANE_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_LANE_ERROR = 183 + // FI_DEV_IS_RESETLESS_MIG_SUPPORTED as defined in nvml/nvml.h + FI_DEV_IS_RESETLESS_MIG_SUPPORTED = 184 + // FI_DEV_POWER_AVERAGE as defined in nvml/nvml.h + FI_DEV_POWER_AVERAGE = 185 + // FI_DEV_POWER_INSTANT as defined in nvml/nvml.h + FI_DEV_POWER_INSTANT = 186 + // FI_DEV_POWER_MIN_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_MIN_LIMIT = 187 + // FI_DEV_POWER_MAX_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_MAX_LIMIT = 188 + // FI_DEV_POWER_DEFAULT_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_DEFAULT_LIMIT = 189 + // FI_DEV_POWER_CURRENT_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_CURRENT_LIMIT = 190 + // FI_DEV_ENERGY as defined in nvml/nvml.h + FI_DEV_ENERGY = 191 + // FI_DEV_POWER_REQUESTED_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_REQUESTED_LIMIT = 192 + // FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT = 193 + // FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT = 194 + // FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT = 195 + // FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT = 196 + // FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE as defined in nvml/nvml.h + FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE = 199 + // FI_MAX as defined in nvml/nvml.h + FI_MAX = 200 + // EventTypeSingleBitEccError as defined in nvml/nvml.h + EventTypeSingleBitEccError = 1 + // EventTypeDoubleBitEccError as defined in nvml/nvml.h + EventTypeDoubleBitEccError = 2 + // EventTypePState as defined in nvml/nvml.h + EventTypePState = 4 + // EventTypeXidCriticalError as defined in nvml/nvml.h + EventTypeXidCriticalError = 8 + // EventTypeClock as defined in nvml/nvml.h + EventTypeClock = 16 + // EventTypePowerSourceChange as defined in nvml/nvml.h + EventTypePowerSourceChange = 128 + // EventMigConfigChange as defined in nvml/nvml.h + EventMigConfigChange = 256 + // EventTypeNone as defined in nvml/nvml.h + EventTypeNone = 0 + // EventTypeAll as defined in nvml/nvml.h + EventTypeAll = 415 + // ClocksEventReasonGpuIdle as defined in nvml/nvml.h + ClocksEventReasonGpuIdle = 1 + // ClocksEventReasonApplicationsClocksSetting as defined in nvml/nvml.h + ClocksEventReasonApplicationsClocksSetting = 2 + // ClocksThrottleReasonUserDefinedClocks as defined in nvml/nvml.h + ClocksThrottleReasonUserDefinedClocks = 2 + // ClocksEventReasonSwPowerCap as defined in nvml/nvml.h + ClocksEventReasonSwPowerCap = 4 + // ClocksThrottleReasonHwSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwSlowdown = 8 + // ClocksEventReasonSyncBoost as defined in nvml/nvml.h + ClocksEventReasonSyncBoost = 16 + // ClocksEventReasonSwThermalSlowdown as defined in nvml/nvml.h + ClocksEventReasonSwThermalSlowdown = 32 + // ClocksThrottleReasonHwThermalSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwThermalSlowdown = 64 + // ClocksThrottleReasonHwPowerBrakeSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwPowerBrakeSlowdown = 128 + // ClocksEventReasonDisplayClockSetting as defined in nvml/nvml.h + ClocksEventReasonDisplayClockSetting = 256 + // ClocksEventReasonNone as defined in nvml/nvml.h + ClocksEventReasonNone = 0 + // ClocksEventReasonAll as defined in nvml/nvml.h + ClocksEventReasonAll = 511 + // ClocksThrottleReasonGpuIdle as defined in nvml/nvml.h + ClocksThrottleReasonGpuIdle = 1 + // ClocksThrottleReasonApplicationsClocksSetting as defined in nvml/nvml.h + ClocksThrottleReasonApplicationsClocksSetting = 2 + // ClocksThrottleReasonSyncBoost as defined in nvml/nvml.h + ClocksThrottleReasonSyncBoost = 16 + // ClocksThrottleReasonSwPowerCap as defined in nvml/nvml.h + ClocksThrottleReasonSwPowerCap = 4 + // ClocksThrottleReasonSwThermalSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonSwThermalSlowdown = 32 + // ClocksThrottleReasonDisplayClockSetting as defined in nvml/nvml.h + ClocksThrottleReasonDisplayClockSetting = 256 + // ClocksThrottleReasonNone as defined in nvml/nvml.h + ClocksThrottleReasonNone = 0 + // ClocksThrottleReasonAll as defined in nvml/nvml.h + ClocksThrottleReasonAll = 511 + // NVFBC_SESSION_FLAG_DIFFMAP_ENABLED as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_DIFFMAP_ENABLED = 1 + // NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED = 2 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT = 4 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE = 8 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT = 16 + // CC_SYSTEM_CPU_CAPS_NONE as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_NONE = 0 + // CC_SYSTEM_CPU_CAPS_AMD_SEV as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_AMD_SEV = 1 + // CC_SYSTEM_CPU_CAPS_INTEL_TDX as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_INTEL_TDX = 2 + // CC_SYSTEM_GPUS_CC_NOT_CAPABLE as defined in nvml/nvml.h + CC_SYSTEM_GPUS_CC_NOT_CAPABLE = 0 + // CC_SYSTEM_GPUS_CC_CAPABLE as defined in nvml/nvml.h + CC_SYSTEM_GPUS_CC_CAPABLE = 1 + // CC_SYSTEM_DEVTOOLS_MODE_OFF as defined in nvml/nvml.h + CC_SYSTEM_DEVTOOLS_MODE_OFF = 0 + // CC_SYSTEM_DEVTOOLS_MODE_ON as defined in nvml/nvml.h + CC_SYSTEM_DEVTOOLS_MODE_ON = 1 + // CC_SYSTEM_ENVIRONMENT_UNAVAILABLE as defined in nvml/nvml.h + CC_SYSTEM_ENVIRONMENT_UNAVAILABLE = 0 + // CC_SYSTEM_ENVIRONMENT_SIM as defined in nvml/nvml.h + CC_SYSTEM_ENVIRONMENT_SIM = 1 + // CC_SYSTEM_ENVIRONMENT_PROD as defined in nvml/nvml.h + CC_SYSTEM_ENVIRONMENT_PROD = 2 + // CC_SYSTEM_FEATURE_DISABLED as defined in nvml/nvml.h + CC_SYSTEM_FEATURE_DISABLED = 0 + // CC_SYSTEM_FEATURE_ENABLED as defined in nvml/nvml.h + CC_SYSTEM_FEATURE_ENABLED = 1 + // CC_SYSTEM_MULTIGPU_NONE as defined in nvml/nvml.h + CC_SYSTEM_MULTIGPU_NONE = 0 + // CC_SYSTEM_MULTIGPU_PROTECTED_PCIE as defined in nvml/nvml.h + CC_SYSTEM_MULTIGPU_PROTECTED_PCIE = 1 + // CC_ACCEPTING_CLIENT_REQUESTS_FALSE as defined in nvml/nvml.h + CC_ACCEPTING_CLIENT_REQUESTS_FALSE = 0 + // CC_ACCEPTING_CLIENT_REQUESTS_TRUE as defined in nvml/nvml.h + CC_ACCEPTING_CLIENT_REQUESTS_TRUE = 1 + // GPU_CERT_CHAIN_SIZE as defined in nvml/nvml.h + GPU_CERT_CHAIN_SIZE = 4096 + // GPU_ATTESTATION_CERT_CHAIN_SIZE as defined in nvml/nvml.h + GPU_ATTESTATION_CERT_CHAIN_SIZE = 5120 + // CC_GPU_CEC_NONCE_SIZE as defined in nvml/nvml.h + CC_GPU_CEC_NONCE_SIZE = 32 + // CC_GPU_ATTESTATION_REPORT_SIZE as defined in nvml/nvml.h + CC_GPU_ATTESTATION_REPORT_SIZE = 8192 + // CC_GPU_CEC_ATTESTATION_REPORT_SIZE as defined in nvml/nvml.h + CC_GPU_CEC_ATTESTATION_REPORT_SIZE = 4096 + // CC_CEC_ATTESTATION_REPORT_NOT_PRESENT as defined in nvml/nvml.h + CC_CEC_ATTESTATION_REPORT_NOT_PRESENT = 0 + // CC_CEC_ATTESTATION_REPORT_PRESENT as defined in nvml/nvml.h + CC_CEC_ATTESTATION_REPORT_PRESENT = 1 + // CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN as defined in nvml/nvml.h + CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN = 50 + // CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX as defined in nvml/nvml.h + CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX = 75 + // GPU_FABRIC_UUID_LEN as defined in nvml/nvml.h + GPU_FABRIC_UUID_LEN = 16 + // GPU_FABRIC_STATE_NOT_SUPPORTED as defined in nvml/nvml.h + GPU_FABRIC_STATE_NOT_SUPPORTED = 0 + // GPU_FABRIC_STATE_NOT_STARTED as defined in nvml/nvml.h + GPU_FABRIC_STATE_NOT_STARTED = 1 + // GPU_FABRIC_STATE_IN_PROGRESS as defined in nvml/nvml.h + GPU_FABRIC_STATE_IN_PROGRESS = 2 + // GPU_FABRIC_STATE_COMPLETED as defined in nvml/nvml.h + GPU_FABRIC_STATE_COMPLETED = 3 + // GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED = 0 + // GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE = 1 + // GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE = 2 + // GPU_FABRIC_HEALTH_MASK_SHIFT_DEGRADED_BW as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_SHIFT_DEGRADED_BW = 0 + // GPU_FABRIC_HEALTH_MASK_WIDTH_DEGRADED_BW as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_WIDTH_DEGRADED_BW = 17 + // POWER_SCOPE_GPU as defined in nvml/nvml.h + POWER_SCOPE_GPU = 0 + // POWER_SCOPE_MODULE as defined in nvml/nvml.h + POWER_SCOPE_MODULE = 1 + // POWER_SCOPE_MEMORY as defined in nvml/nvml.h + POWER_SCOPE_MEMORY = 2 + // INIT_FLAG_NO_GPUS as defined in nvml/nvml.h + INIT_FLAG_NO_GPUS = 1 + // INIT_FLAG_NO_ATTACH as defined in nvml/nvml.h + INIT_FLAG_NO_ATTACH = 2 + // DEVICE_INFOROM_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_INFOROM_VERSION_BUFFER_SIZE = 16 + // DEVICE_UUID_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_UUID_BUFFER_SIZE = 80 + // DEVICE_UUID_V2_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_UUID_V2_BUFFER_SIZE = 96 + // DEVICE_PART_NUMBER_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_PART_NUMBER_BUFFER_SIZE = 80 + // SYSTEM_DRIVER_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + SYSTEM_DRIVER_VERSION_BUFFER_SIZE = 80 + // SYSTEM_NVML_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + SYSTEM_NVML_VERSION_BUFFER_SIZE = 80 + // DEVICE_NAME_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_NAME_BUFFER_SIZE = 64 + // DEVICE_NAME_V2_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_NAME_V2_BUFFER_SIZE = 96 + // DEVICE_SERIAL_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_SERIAL_BUFFER_SIZE = 30 + // DEVICE_VBIOS_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_VBIOS_VERSION_BUFFER_SIZE = 32 + // AFFINITY_SCOPE_NODE as defined in nvml/nvml.h + AFFINITY_SCOPE_NODE = 0 + // AFFINITY_SCOPE_SOCKET as defined in nvml/nvml.h + AFFINITY_SCOPE_SOCKET = 1 + // DEVICE_MIG_DISABLE as defined in nvml/nvml.h + DEVICE_MIG_DISABLE = 0 + // DEVICE_MIG_ENABLE as defined in nvml/nvml.h + DEVICE_MIG_ENABLE = 1 + // GPU_INSTANCE_PROFILE_1_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE = 0 + // GPU_INSTANCE_PROFILE_2_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE = 1 + // GPU_INSTANCE_PROFILE_3_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_3_SLICE = 2 + // GPU_INSTANCE_PROFILE_4_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_4_SLICE = 3 + // GPU_INSTANCE_PROFILE_7_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_7_SLICE = 4 + // GPU_INSTANCE_PROFILE_8_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_8_SLICE = 5 + // GPU_INSTANCE_PROFILE_6_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_6_SLICE = 6 + // GPU_INSTANCE_PROFILE_1_SLICE_REV1 as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_REV1 = 7 + // GPU_INSTANCE_PROFILE_2_SLICE_REV1 as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE_REV1 = 8 + // GPU_INSTANCE_PROFILE_1_SLICE_REV2 as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_REV2 = 9 + // GPU_INSTANCE_PROFILE_COUNT as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_COUNT = 10 + // GPU_INTSTANCE_PROFILE_CAPS_P2P as defined in nvml/nvml.h + GPU_INTSTANCE_PROFILE_CAPS_P2P = 1 + // COMPUTE_INSTANCE_PROFILE_1_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_1_SLICE = 0 + // COMPUTE_INSTANCE_PROFILE_2_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_2_SLICE = 1 + // COMPUTE_INSTANCE_PROFILE_3_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_3_SLICE = 2 + // COMPUTE_INSTANCE_PROFILE_4_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_4_SLICE = 3 + // COMPUTE_INSTANCE_PROFILE_7_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_7_SLICE = 4 + // COMPUTE_INSTANCE_PROFILE_8_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_8_SLICE = 5 + // COMPUTE_INSTANCE_PROFILE_6_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_6_SLICE = 6 + // COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 = 7 + // COMPUTE_INSTANCE_PROFILE_COUNT as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_COUNT = 8 + // COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED as defined in nvml/nvml.h + COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = 0 + // COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT as defined in nvml/nvml.h + COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = 1 + // GPM_METRICS_GET_VERSION as defined in nvml/nvml.h + GPM_METRICS_GET_VERSION = 1 + // GPM_SUPPORT_VERSION as defined in nvml/nvml.h + GPM_SUPPORT_VERSION = 1 + // NVLINK_POWER_STATE_HIGH_SPEED as defined in nvml/nvml.h + NVLINK_POWER_STATE_HIGH_SPEED = 0 + // NVLINK_POWER_STATE_LOW as defined in nvml/nvml.h + NVLINK_POWER_STATE_LOW = 1 + // NVLINK_LOW_POWER_THRESHOLD_MIN as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_MIN = 1 + // NVLINK_LOW_POWER_THRESHOLD_MAX as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_MAX = 8191 + // NVLINK_LOW_POWER_THRESHOLD_RESET as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_RESET = 4294967295 +) + +// BridgeChipType as declared in nvml/nvml.h +type BridgeChipType int32 + +// BridgeChipType enumeration from nvml/nvml.h +const ( + BRIDGE_CHIP_PLX BridgeChipType = iota + BRIDGE_CHIP_BRO4 BridgeChipType = 1 +) + +// NvLinkUtilizationCountUnits as declared in nvml/nvml.h +type NvLinkUtilizationCountUnits int32 + +// NvLinkUtilizationCountUnits enumeration from nvml/nvml.h +const ( + NVLINK_COUNTER_UNIT_CYCLES NvLinkUtilizationCountUnits = iota + NVLINK_COUNTER_UNIT_PACKETS NvLinkUtilizationCountUnits = 1 + NVLINK_COUNTER_UNIT_BYTES NvLinkUtilizationCountUnits = 2 + NVLINK_COUNTER_UNIT_RESERVED NvLinkUtilizationCountUnits = 3 + NVLINK_COUNTER_UNIT_COUNT NvLinkUtilizationCountUnits = 4 +) + +// NvLinkUtilizationCountPktTypes as declared in nvml/nvml.h +type NvLinkUtilizationCountPktTypes int32 + +// NvLinkUtilizationCountPktTypes enumeration from nvml/nvml.h +const ( + NVLINK_COUNTER_PKTFILTER_NOP NvLinkUtilizationCountPktTypes = 1 + NVLINK_COUNTER_PKTFILTER_READ NvLinkUtilizationCountPktTypes = 2 + NVLINK_COUNTER_PKTFILTER_WRITE NvLinkUtilizationCountPktTypes = 4 + NVLINK_COUNTER_PKTFILTER_RATOM NvLinkUtilizationCountPktTypes = 8 + NVLINK_COUNTER_PKTFILTER_NRATOM NvLinkUtilizationCountPktTypes = 16 + NVLINK_COUNTER_PKTFILTER_FLUSH NvLinkUtilizationCountPktTypes = 32 + NVLINK_COUNTER_PKTFILTER_RESPDATA NvLinkUtilizationCountPktTypes = 64 + NVLINK_COUNTER_PKTFILTER_RESPNODATA NvLinkUtilizationCountPktTypes = 128 + NVLINK_COUNTER_PKTFILTER_ALL NvLinkUtilizationCountPktTypes = 255 +) + +// NvLinkCapability as declared in nvml/nvml.h +type NvLinkCapability int32 + +// NvLinkCapability enumeration from nvml/nvml.h +const ( + NVLINK_CAP_P2P_SUPPORTED NvLinkCapability = iota + NVLINK_CAP_SYSMEM_ACCESS NvLinkCapability = 1 + NVLINK_CAP_P2P_ATOMICS NvLinkCapability = 2 + NVLINK_CAP_SYSMEM_ATOMICS NvLinkCapability = 3 + NVLINK_CAP_SLI_BRIDGE NvLinkCapability = 4 + NVLINK_CAP_VALID NvLinkCapability = 5 + NVLINK_CAP_COUNT NvLinkCapability = 6 +) + +// NvLinkErrorCounter as declared in nvml/nvml.h +type NvLinkErrorCounter int32 + +// NvLinkErrorCounter enumeration from nvml/nvml.h +const ( + NVLINK_ERROR_DL_REPLAY NvLinkErrorCounter = iota + NVLINK_ERROR_DL_RECOVERY NvLinkErrorCounter = 1 + NVLINK_ERROR_DL_CRC_FLIT NvLinkErrorCounter = 2 + NVLINK_ERROR_DL_CRC_DATA NvLinkErrorCounter = 3 + NVLINK_ERROR_DL_ECC_DATA NvLinkErrorCounter = 4 + NVLINK_ERROR_COUNT NvLinkErrorCounter = 5 +) + +// IntNvLinkDeviceType as declared in nvml/nvml.h +type IntNvLinkDeviceType int32 + +// IntNvLinkDeviceType enumeration from nvml/nvml.h +const ( + NVLINK_DEVICE_TYPE_GPU IntNvLinkDeviceType = iota + NVLINK_DEVICE_TYPE_IBMNPU IntNvLinkDeviceType = 1 + NVLINK_DEVICE_TYPE_SWITCH IntNvLinkDeviceType = 2 + NVLINK_DEVICE_TYPE_UNKNOWN IntNvLinkDeviceType = 255 +) + +// GpuTopologyLevel as declared in nvml/nvml.h +type GpuTopologyLevel int32 + +// GpuTopologyLevel enumeration from nvml/nvml.h +const ( + TOPOLOGY_INTERNAL GpuTopologyLevel = iota + TOPOLOGY_SINGLE GpuTopologyLevel = 10 + TOPOLOGY_MULTIPLE GpuTopologyLevel = 20 + TOPOLOGY_HOSTBRIDGE GpuTopologyLevel = 30 + TOPOLOGY_NODE GpuTopologyLevel = 40 + TOPOLOGY_SYSTEM GpuTopologyLevel = 50 +) + +// GpuP2PStatus as declared in nvml/nvml.h +type GpuP2PStatus int32 + +// GpuP2PStatus enumeration from nvml/nvml.h +const ( + P2P_STATUS_OK GpuP2PStatus = iota + P2P_STATUS_CHIPSET_NOT_SUPPORED GpuP2PStatus = 1 + P2P_STATUS_CHIPSET_NOT_SUPPORTED GpuP2PStatus = 1 + P2P_STATUS_GPU_NOT_SUPPORTED GpuP2PStatus = 2 + P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED GpuP2PStatus = 3 + P2P_STATUS_DISABLED_BY_REGKEY GpuP2PStatus = 4 + P2P_STATUS_NOT_SUPPORTED GpuP2PStatus = 5 + P2P_STATUS_UNKNOWN GpuP2PStatus = 6 +) + +// GpuP2PCapsIndex as declared in nvml/nvml.h +type GpuP2PCapsIndex int32 + +// GpuP2PCapsIndex enumeration from nvml/nvml.h +const ( + P2P_CAPS_INDEX_READ GpuP2PCapsIndex = iota + P2P_CAPS_INDEX_WRITE GpuP2PCapsIndex = 1 + P2P_CAPS_INDEX_NVLINK GpuP2PCapsIndex = 2 + P2P_CAPS_INDEX_ATOMICS GpuP2PCapsIndex = 3 + P2P_CAPS_INDEX_PCI GpuP2PCapsIndex = 4 + P2P_CAPS_INDEX_PROP GpuP2PCapsIndex = 4 + P2P_CAPS_INDEX_UNKNOWN GpuP2PCapsIndex = 5 +) + +// SamplingType as declared in nvml/nvml.h +type SamplingType int32 + +// SamplingType enumeration from nvml/nvml.h +const ( + TOTAL_POWER_SAMPLES SamplingType = iota + GPU_UTILIZATION_SAMPLES SamplingType = 1 + MEMORY_UTILIZATION_SAMPLES SamplingType = 2 + ENC_UTILIZATION_SAMPLES SamplingType = 3 + DEC_UTILIZATION_SAMPLES SamplingType = 4 + PROCESSOR_CLK_SAMPLES SamplingType = 5 + MEMORY_CLK_SAMPLES SamplingType = 6 + MODULE_POWER_SAMPLES SamplingType = 7 + JPG_UTILIZATION_SAMPLES SamplingType = 8 + OFA_UTILIZATION_SAMPLES SamplingType = 9 + SAMPLINGTYPE_COUNT SamplingType = 10 +) + +// PcieUtilCounter as declared in nvml/nvml.h +type PcieUtilCounter int32 + +// PcieUtilCounter enumeration from nvml/nvml.h +const ( + PCIE_UTIL_TX_BYTES PcieUtilCounter = iota + PCIE_UTIL_RX_BYTES PcieUtilCounter = 1 + PCIE_UTIL_COUNT PcieUtilCounter = 2 +) + +// ValueType as declared in nvml/nvml.h +type ValueType int32 + +// ValueType enumeration from nvml/nvml.h +const ( + VALUE_TYPE_DOUBLE ValueType = iota + VALUE_TYPE_UNSIGNED_INT ValueType = 1 + VALUE_TYPE_UNSIGNED_LONG ValueType = 2 + VALUE_TYPE_UNSIGNED_LONG_LONG ValueType = 3 + VALUE_TYPE_SIGNED_LONG_LONG ValueType = 4 + VALUE_TYPE_SIGNED_INT ValueType = 5 + VALUE_TYPE_COUNT ValueType = 6 +) + +// PerfPolicyType as declared in nvml/nvml.h +type PerfPolicyType int32 + +// PerfPolicyType enumeration from nvml/nvml.h +const ( + PERF_POLICY_POWER PerfPolicyType = iota + PERF_POLICY_THERMAL PerfPolicyType = 1 + PERF_POLICY_SYNC_BOOST PerfPolicyType = 2 + PERF_POLICY_BOARD_LIMIT PerfPolicyType = 3 + PERF_POLICY_LOW_UTILIZATION PerfPolicyType = 4 + PERF_POLICY_RELIABILITY PerfPolicyType = 5 + PERF_POLICY_TOTAL_APP_CLOCKS PerfPolicyType = 10 + PERF_POLICY_TOTAL_BASE_CLOCKS PerfPolicyType = 11 + PERF_POLICY_COUNT PerfPolicyType = 12 +) + +// EnableState as declared in nvml/nvml.h +type EnableState int32 + +// EnableState enumeration from nvml/nvml.h +const ( + FEATURE_DISABLED EnableState = iota + FEATURE_ENABLED EnableState = 1 +) + +// BrandType as declared in nvml/nvml.h +type BrandType int32 + +// BrandType enumeration from nvml/nvml.h +const ( + BRAND_UNKNOWN BrandType = iota + BRAND_QUADRO BrandType = 1 + BRAND_TESLA BrandType = 2 + BRAND_NVS BrandType = 3 + BRAND_GRID BrandType = 4 + BRAND_GEFORCE BrandType = 5 + BRAND_TITAN BrandType = 6 + BRAND_NVIDIA_VAPPS BrandType = 7 + BRAND_NVIDIA_VPC BrandType = 8 + BRAND_NVIDIA_VCS BrandType = 9 + BRAND_NVIDIA_VWS BrandType = 10 + BRAND_NVIDIA_CLOUD_GAMING BrandType = 11 + BRAND_NVIDIA_VGAMING BrandType = 11 + BRAND_QUADRO_RTX BrandType = 12 + BRAND_NVIDIA_RTX BrandType = 13 + BRAND_NVIDIA BrandType = 14 + BRAND_GEFORCE_RTX BrandType = 15 + BRAND_TITAN_RTX BrandType = 16 + BRAND_COUNT BrandType = 17 +) + +// TemperatureThresholds as declared in nvml/nvml.h +type TemperatureThresholds int32 + +// TemperatureThresholds enumeration from nvml/nvml.h +const ( + TEMPERATURE_THRESHOLD_SHUTDOWN TemperatureThresholds = iota + TEMPERATURE_THRESHOLD_SLOWDOWN TemperatureThresholds = 1 + TEMPERATURE_THRESHOLD_MEM_MAX TemperatureThresholds = 2 + TEMPERATURE_THRESHOLD_GPU_MAX TemperatureThresholds = 3 + TEMPERATURE_THRESHOLD_ACOUSTIC_MIN TemperatureThresholds = 4 + TEMPERATURE_THRESHOLD_ACOUSTIC_CURR TemperatureThresholds = 5 + TEMPERATURE_THRESHOLD_ACOUSTIC_MAX TemperatureThresholds = 6 + TEMPERATURE_THRESHOLD_COUNT TemperatureThresholds = 7 +) + +// TemperatureSensors as declared in nvml/nvml.h +type TemperatureSensors int32 + +// TemperatureSensors enumeration from nvml/nvml.h +const ( + TEMPERATURE_GPU TemperatureSensors = iota + TEMPERATURE_COUNT TemperatureSensors = 1 +) + +// ComputeMode as declared in nvml/nvml.h +type ComputeMode int32 + +// ComputeMode enumeration from nvml/nvml.h +const ( + COMPUTEMODE_DEFAULT ComputeMode = iota + COMPUTEMODE_EXCLUSIVE_THREAD ComputeMode = 1 + COMPUTEMODE_PROHIBITED ComputeMode = 2 + COMPUTEMODE_EXCLUSIVE_PROCESS ComputeMode = 3 + COMPUTEMODE_COUNT ComputeMode = 4 +) + +// MemoryErrorType as declared in nvml/nvml.h +type MemoryErrorType int32 + +// MemoryErrorType enumeration from nvml/nvml.h +const ( + MEMORY_ERROR_TYPE_CORRECTED MemoryErrorType = iota + MEMORY_ERROR_TYPE_UNCORRECTED MemoryErrorType = 1 + MEMORY_ERROR_TYPE_COUNT MemoryErrorType = 2 +) + +// EccCounterType as declared in nvml/nvml.h +type EccCounterType int32 + +// EccCounterType enumeration from nvml/nvml.h +const ( + VOLATILE_ECC EccCounterType = iota + AGGREGATE_ECC EccCounterType = 1 + ECC_COUNTER_TYPE_COUNT EccCounterType = 2 +) + +// ClockType as declared in nvml/nvml.h +type ClockType int32 + +// ClockType enumeration from nvml/nvml.h +const ( + CLOCK_GRAPHICS ClockType = iota + CLOCK_SM ClockType = 1 + CLOCK_MEM ClockType = 2 + CLOCK_VIDEO ClockType = 3 + CLOCK_COUNT ClockType = 4 +) + +// ClockId as declared in nvml/nvml.h +type ClockId int32 + +// ClockId enumeration from nvml/nvml.h +const ( + CLOCK_ID_CURRENT ClockId = iota + CLOCK_ID_APP_CLOCK_TARGET ClockId = 1 + CLOCK_ID_APP_CLOCK_DEFAULT ClockId = 2 + CLOCK_ID_CUSTOMER_BOOST_MAX ClockId = 3 + CLOCK_ID_COUNT ClockId = 4 +) + +// DriverModel as declared in nvml/nvml.h +type DriverModel int32 + +// DriverModel enumeration from nvml/nvml.h +const ( + DRIVER_WDDM DriverModel = iota + DRIVER_WDM DriverModel = 1 +) + +// Pstates as declared in nvml/nvml.h +type Pstates int32 + +// Pstates enumeration from nvml/nvml.h +const ( + PSTATE_0 Pstates = iota + PSTATE_1 Pstates = 1 + PSTATE_2 Pstates = 2 + PSTATE_3 Pstates = 3 + PSTATE_4 Pstates = 4 + PSTATE_5 Pstates = 5 + PSTATE_6 Pstates = 6 + PSTATE_7 Pstates = 7 + PSTATE_8 Pstates = 8 + PSTATE_9 Pstates = 9 + PSTATE_10 Pstates = 10 + PSTATE_11 Pstates = 11 + PSTATE_12 Pstates = 12 + PSTATE_13 Pstates = 13 + PSTATE_14 Pstates = 14 + PSTATE_15 Pstates = 15 + PSTATE_UNKNOWN Pstates = 32 +) + +// GpuOperationMode as declared in nvml/nvml.h +type GpuOperationMode int32 + +// GpuOperationMode enumeration from nvml/nvml.h +const ( + GOM_ALL_ON GpuOperationMode = iota + GOM_COMPUTE GpuOperationMode = 1 + GOM_LOW_DP GpuOperationMode = 2 +) + +// InforomObject as declared in nvml/nvml.h +type InforomObject int32 + +// InforomObject enumeration from nvml/nvml.h +const ( + INFOROM_OEM InforomObject = iota + INFOROM_ECC InforomObject = 1 + INFOROM_POWER InforomObject = 2 + INFOROM_COUNT InforomObject = 3 +) + +// Return as declared in nvml/nvml.h +type Return int32 + +// Return enumeration from nvml/nvml.h +const ( + SUCCESS Return = iota + ERROR_UNINITIALIZED Return = 1 + ERROR_INVALID_ARGUMENT Return = 2 + ERROR_NOT_SUPPORTED Return = 3 + ERROR_NO_PERMISSION Return = 4 + ERROR_ALREADY_INITIALIZED Return = 5 + ERROR_NOT_FOUND Return = 6 + ERROR_INSUFFICIENT_SIZE Return = 7 + ERROR_INSUFFICIENT_POWER Return = 8 + ERROR_DRIVER_NOT_LOADED Return = 9 + ERROR_TIMEOUT Return = 10 + ERROR_IRQ_ISSUE Return = 11 + ERROR_LIBRARY_NOT_FOUND Return = 12 + ERROR_FUNCTION_NOT_FOUND Return = 13 + ERROR_CORRUPTED_INFOROM Return = 14 + ERROR_GPU_IS_LOST Return = 15 + ERROR_RESET_REQUIRED Return = 16 + ERROR_OPERATING_SYSTEM Return = 17 + ERROR_LIB_RM_VERSION_MISMATCH Return = 18 + ERROR_IN_USE Return = 19 + ERROR_MEMORY Return = 20 + ERROR_NO_DATA Return = 21 + ERROR_VGPU_ECC_NOT_SUPPORTED Return = 22 + ERROR_INSUFFICIENT_RESOURCES Return = 23 + ERROR_FREQ_NOT_SUPPORTED Return = 24 + ERROR_ARGUMENT_VERSION_MISMATCH Return = 25 + ERROR_DEPRECATED Return = 26 + ERROR_NOT_READY Return = 27 + ERROR_GPU_NOT_FOUND Return = 28 + ERROR_INVALID_STATE Return = 29 + ERROR_UNKNOWN Return = 999 +) + +// MemoryLocation as declared in nvml/nvml.h +type MemoryLocation int32 + +// MemoryLocation enumeration from nvml/nvml.h +const ( + MEMORY_LOCATION_L1_CACHE MemoryLocation = iota + MEMORY_LOCATION_L2_CACHE MemoryLocation = 1 + MEMORY_LOCATION_DRAM MemoryLocation = 2 + MEMORY_LOCATION_DEVICE_MEMORY MemoryLocation = 2 + MEMORY_LOCATION_REGISTER_FILE MemoryLocation = 3 + MEMORY_LOCATION_TEXTURE_MEMORY MemoryLocation = 4 + MEMORY_LOCATION_TEXTURE_SHM MemoryLocation = 5 + MEMORY_LOCATION_CBU MemoryLocation = 6 + MEMORY_LOCATION_SRAM MemoryLocation = 7 + MEMORY_LOCATION_COUNT MemoryLocation = 8 +) + +// PageRetirementCause as declared in nvml/nvml.h +type PageRetirementCause int32 + +// PageRetirementCause enumeration from nvml/nvml.h +const ( + PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS PageRetirementCause = iota + PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR PageRetirementCause = 1 + PAGE_RETIREMENT_CAUSE_COUNT PageRetirementCause = 2 +) + +// RestrictedAPI as declared in nvml/nvml.h +type RestrictedAPI int32 + +// RestrictedAPI enumeration from nvml/nvml.h +const ( + RESTRICTED_API_SET_APPLICATION_CLOCKS RestrictedAPI = iota + RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS RestrictedAPI = 1 + RESTRICTED_API_COUNT RestrictedAPI = 2 +) + +// GpuVirtualizationMode as declared in nvml/nvml.h +type GpuVirtualizationMode int32 + +// GpuVirtualizationMode enumeration from nvml/nvml.h +const ( + GPU_VIRTUALIZATION_MODE_NONE GpuVirtualizationMode = iota + GPU_VIRTUALIZATION_MODE_PASSTHROUGH GpuVirtualizationMode = 1 + GPU_VIRTUALIZATION_MODE_VGPU GpuVirtualizationMode = 2 + GPU_VIRTUALIZATION_MODE_HOST_VGPU GpuVirtualizationMode = 3 + GPU_VIRTUALIZATION_MODE_HOST_VSGA GpuVirtualizationMode = 4 +) + +// HostVgpuMode as declared in nvml/nvml.h +type HostVgpuMode int32 + +// HostVgpuMode enumeration from nvml/nvml.h +const ( + HOST_VGPU_MODE_NON_SRIOV HostVgpuMode = iota + HOST_VGPU_MODE_SRIOV HostVgpuMode = 1 +) + +// VgpuVmIdType as declared in nvml/nvml.h +type VgpuVmIdType int32 + +// VgpuVmIdType enumeration from nvml/nvml.h +const ( + VGPU_VM_ID_DOMAIN_ID VgpuVmIdType = iota + VGPU_VM_ID_UUID VgpuVmIdType = 1 +) + +// VgpuGuestInfoState as declared in nvml/nvml.h +type VgpuGuestInfoState int32 + +// VgpuGuestInfoState enumeration from nvml/nvml.h +const ( + VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED VgpuGuestInfoState = iota + VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED VgpuGuestInfoState = 1 +) + +// VgpuCapability as declared in nvml/nvml.h +type VgpuCapability int32 + +// VgpuCapability enumeration from nvml/nvml.h +const ( + VGPU_CAP_NVLINK_P2P VgpuCapability = iota + VGPU_CAP_GPUDIRECT VgpuCapability = 1 + VGPU_CAP_MULTI_VGPU_EXCLUSIVE VgpuCapability = 2 + VGPU_CAP_EXCLUSIVE_TYPE VgpuCapability = 3 + VGPU_CAP_EXCLUSIVE_SIZE VgpuCapability = 4 + VGPU_CAP_COUNT VgpuCapability = 5 +) + +// VgpuDriverCapability as declared in nvml/nvml.h +type VgpuDriverCapability int32 + +// VgpuDriverCapability enumeration from nvml/nvml.h +const ( + VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU VgpuDriverCapability = iota + VGPU_DRIVER_CAP_COUNT VgpuDriverCapability = 1 +) + +// DeviceVgpuCapability as declared in nvml/nvml.h +type DeviceVgpuCapability int32 + +// DeviceVgpuCapability enumeration from nvml/nvml.h +const ( + DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU DeviceVgpuCapability = iota + DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES DeviceVgpuCapability = 1 + DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES DeviceVgpuCapability = 2 + DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW DeviceVgpuCapability = 3 + DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW DeviceVgpuCapability = 4 + DEVICE_VGPU_CAP_DEVICE_STREAMING DeviceVgpuCapability = 5 + DEVICE_VGPU_CAP_MINI_QUARTER_GPU DeviceVgpuCapability = 6 + DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU DeviceVgpuCapability = 7 + DEVICE_VGPU_CAP_COUNT DeviceVgpuCapability = 8 +) + +// GpuUtilizationDomainId as declared in nvml/nvml.h +type GpuUtilizationDomainId int32 + +// GpuUtilizationDomainId enumeration from nvml/nvml.h +const ( + GPU_UTILIZATION_DOMAIN_GPU GpuUtilizationDomainId = iota + GPU_UTILIZATION_DOMAIN_FB GpuUtilizationDomainId = 1 + GPU_UTILIZATION_DOMAIN_VID GpuUtilizationDomainId = 2 + GPU_UTILIZATION_DOMAIN_BUS GpuUtilizationDomainId = 3 +) + +// FanState as declared in nvml/nvml.h +type FanState int32 + +// FanState enumeration from nvml/nvml.h +const ( + FAN_NORMAL FanState = iota + FAN_FAILED FanState = 1 +) + +// LedColor as declared in nvml/nvml.h +type LedColor int32 + +// LedColor enumeration from nvml/nvml.h +const ( + LED_COLOR_GREEN LedColor = iota + LED_COLOR_AMBER LedColor = 1 +) + +// EncoderType as declared in nvml/nvml.h +type EncoderType int32 + +// EncoderType enumeration from nvml/nvml.h +const ( + ENCODER_QUERY_H264 EncoderType = iota + ENCODER_QUERY_HEVC EncoderType = 1 + ENCODER_QUERY_AV1 EncoderType = 2 + ENCODER_QUERY_UNKNOWN EncoderType = 255 +) + +// FBCSessionType as declared in nvml/nvml.h +type FBCSessionType int32 + +// FBCSessionType enumeration from nvml/nvml.h +const ( + FBC_SESSION_TYPE_UNKNOWN FBCSessionType = iota + FBC_SESSION_TYPE_TOSYS FBCSessionType = 1 + FBC_SESSION_TYPE_CUDA FBCSessionType = 2 + FBC_SESSION_TYPE_VID FBCSessionType = 3 + FBC_SESSION_TYPE_HWENC FBCSessionType = 4 +) + +// DetachGpuState as declared in nvml/nvml.h +type DetachGpuState int32 + +// DetachGpuState enumeration from nvml/nvml.h +const ( + DETACH_GPU_KEEP DetachGpuState = iota + DETACH_GPU_REMOVE DetachGpuState = 1 +) + +// PcieLinkState as declared in nvml/nvml.h +type PcieLinkState int32 + +// PcieLinkState enumeration from nvml/nvml.h +const ( + PCIE_LINK_KEEP PcieLinkState = iota + PCIE_LINK_SHUT_DOWN PcieLinkState = 1 +) + +// ClockLimitId as declared in nvml/nvml.h +type ClockLimitId int32 + +// ClockLimitId enumeration from nvml/nvml.h +const ( + CLOCK_LIMIT_ID_RANGE_START ClockLimitId = -256 + CLOCK_LIMIT_ID_TDP ClockLimitId = -255 + CLOCK_LIMIT_ID_UNLIMITED ClockLimitId = -254 +) + +// VgpuVmCompatibility as declared in nvml/nvml.h +type VgpuVmCompatibility int32 + +// VgpuVmCompatibility enumeration from nvml/nvml.h +const ( + VGPU_VM_COMPATIBILITY_NONE VgpuVmCompatibility = iota + VGPU_VM_COMPATIBILITY_COLD VgpuVmCompatibility = 1 + VGPU_VM_COMPATIBILITY_HIBERNATE VgpuVmCompatibility = 2 + VGPU_VM_COMPATIBILITY_SLEEP VgpuVmCompatibility = 4 + VGPU_VM_COMPATIBILITY_LIVE VgpuVmCompatibility = 8 +) + +// VgpuPgpuCompatibilityLimitCode as declared in nvml/nvml.h +type VgpuPgpuCompatibilityLimitCode int32 + +// VgpuPgpuCompatibilityLimitCode enumeration from nvml/nvml.h +const ( + VGPU_COMPATIBILITY_LIMIT_NONE VgpuPgpuCompatibilityLimitCode = iota + VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER VgpuPgpuCompatibilityLimitCode = 1 + VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER VgpuPgpuCompatibilityLimitCode = 2 + VGPU_COMPATIBILITY_LIMIT_GPU VgpuPgpuCompatibilityLimitCode = 4 + VGPU_COMPATIBILITY_LIMIT_OTHER VgpuPgpuCompatibilityLimitCode = -2147483648 +) + +// ThermalTarget as declared in nvml/nvml.h +type ThermalTarget int32 + +// ThermalTarget enumeration from nvml/nvml.h +const ( + THERMAL_TARGET_NONE ThermalTarget = iota + THERMAL_TARGET_GPU ThermalTarget = 1 + THERMAL_TARGET_MEMORY ThermalTarget = 2 + THERMAL_TARGET_POWER_SUPPLY ThermalTarget = 4 + THERMAL_TARGET_BOARD ThermalTarget = 8 + THERMAL_TARGET_VCD_BOARD ThermalTarget = 9 + THERMAL_TARGET_VCD_INLET ThermalTarget = 10 + THERMAL_TARGET_VCD_OUTLET ThermalTarget = 11 + THERMAL_TARGET_ALL ThermalTarget = 15 + THERMAL_TARGET_UNKNOWN ThermalTarget = -1 +) + +// ThermalController as declared in nvml/nvml.h +type ThermalController int32 + +// ThermalController enumeration from nvml/nvml.h +const ( + THERMAL_CONTROLLER_NONE ThermalController = iota + THERMAL_CONTROLLER_GPU_INTERNAL ThermalController = 1 + THERMAL_CONTROLLER_ADM1032 ThermalController = 2 + THERMAL_CONTROLLER_ADT7461 ThermalController = 3 + THERMAL_CONTROLLER_MAX6649 ThermalController = 4 + THERMAL_CONTROLLER_MAX1617 ThermalController = 5 + THERMAL_CONTROLLER_LM99 ThermalController = 6 + THERMAL_CONTROLLER_LM89 ThermalController = 7 + THERMAL_CONTROLLER_LM64 ThermalController = 8 + THERMAL_CONTROLLER_G781 ThermalController = 9 + THERMAL_CONTROLLER_ADT7473 ThermalController = 10 + THERMAL_CONTROLLER_SBMAX6649 ThermalController = 11 + THERMAL_CONTROLLER_VBIOSEVT ThermalController = 12 + THERMAL_CONTROLLER_OS ThermalController = 13 + THERMAL_CONTROLLER_NVSYSCON_CANOAS ThermalController = 14 + THERMAL_CONTROLLER_NVSYSCON_E551 ThermalController = 15 + THERMAL_CONTROLLER_MAX6649R ThermalController = 16 + THERMAL_CONTROLLER_ADT7473S ThermalController = 17 + THERMAL_CONTROLLER_UNKNOWN ThermalController = -1 +) + +// GridLicenseFeatureCode as declared in nvml/nvml.h +type GridLicenseFeatureCode int32 + +// GridLicenseFeatureCode enumeration from nvml/nvml.h +const ( + GRID_LICENSE_FEATURE_CODE_UNKNOWN GridLicenseFeatureCode = iota + GRID_LICENSE_FEATURE_CODE_VGPU GridLicenseFeatureCode = 1 + GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX GridLicenseFeatureCode = 2 + GRID_LICENSE_FEATURE_CODE_VWORKSTATION GridLicenseFeatureCode = 2 + GRID_LICENSE_FEATURE_CODE_GAMING GridLicenseFeatureCode = 3 + GRID_LICENSE_FEATURE_CODE_COMPUTE GridLicenseFeatureCode = 4 +) + +// GpmMetricId as declared in nvml/nvml.h +type GpmMetricId int32 + +// GpmMetricId enumeration from nvml/nvml.h +const ( + GPM_METRIC_GRAPHICS_UTIL GpmMetricId = 1 + GPM_METRIC_SM_UTIL GpmMetricId = 2 + GPM_METRIC_SM_OCCUPANCY GpmMetricId = 3 + GPM_METRIC_INTEGER_UTIL GpmMetricId = 4 + GPM_METRIC_ANY_TENSOR_UTIL GpmMetricId = 5 + GPM_METRIC_DFMA_TENSOR_UTIL GpmMetricId = 6 + GPM_METRIC_HMMA_TENSOR_UTIL GpmMetricId = 7 + GPM_METRIC_IMMA_TENSOR_UTIL GpmMetricId = 9 + GPM_METRIC_DRAM_BW_UTIL GpmMetricId = 10 + GPM_METRIC_FP64_UTIL GpmMetricId = 11 + GPM_METRIC_FP32_UTIL GpmMetricId = 12 + GPM_METRIC_FP16_UTIL GpmMetricId = 13 + GPM_METRIC_PCIE_TX_PER_SEC GpmMetricId = 20 + GPM_METRIC_PCIE_RX_PER_SEC GpmMetricId = 21 + GPM_METRIC_NVDEC_0_UTIL GpmMetricId = 30 + GPM_METRIC_NVDEC_1_UTIL GpmMetricId = 31 + GPM_METRIC_NVDEC_2_UTIL GpmMetricId = 32 + GPM_METRIC_NVDEC_3_UTIL GpmMetricId = 33 + GPM_METRIC_NVDEC_4_UTIL GpmMetricId = 34 + GPM_METRIC_NVDEC_5_UTIL GpmMetricId = 35 + GPM_METRIC_NVDEC_6_UTIL GpmMetricId = 36 + GPM_METRIC_NVDEC_7_UTIL GpmMetricId = 37 + GPM_METRIC_NVJPG_0_UTIL GpmMetricId = 40 + GPM_METRIC_NVJPG_1_UTIL GpmMetricId = 41 + GPM_METRIC_NVJPG_2_UTIL GpmMetricId = 42 + GPM_METRIC_NVJPG_3_UTIL GpmMetricId = 43 + GPM_METRIC_NVJPG_4_UTIL GpmMetricId = 44 + GPM_METRIC_NVJPG_5_UTIL GpmMetricId = 45 + GPM_METRIC_NVJPG_6_UTIL GpmMetricId = 46 + GPM_METRIC_NVJPG_7_UTIL GpmMetricId = 47 + GPM_METRIC_NVOFA_0_UTIL GpmMetricId = 50 + GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC GpmMetricId = 60 + GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC GpmMetricId = 61 + GPM_METRIC_NVLINK_L0_RX_PER_SEC GpmMetricId = 62 + GPM_METRIC_NVLINK_L0_TX_PER_SEC GpmMetricId = 63 + GPM_METRIC_NVLINK_L1_RX_PER_SEC GpmMetricId = 64 + GPM_METRIC_NVLINK_L1_TX_PER_SEC GpmMetricId = 65 + GPM_METRIC_NVLINK_L2_RX_PER_SEC GpmMetricId = 66 + GPM_METRIC_NVLINK_L2_TX_PER_SEC GpmMetricId = 67 + GPM_METRIC_NVLINK_L3_RX_PER_SEC GpmMetricId = 68 + GPM_METRIC_NVLINK_L3_TX_PER_SEC GpmMetricId = 69 + GPM_METRIC_NVLINK_L4_RX_PER_SEC GpmMetricId = 70 + GPM_METRIC_NVLINK_L4_TX_PER_SEC GpmMetricId = 71 + GPM_METRIC_NVLINK_L5_RX_PER_SEC GpmMetricId = 72 + GPM_METRIC_NVLINK_L5_TX_PER_SEC GpmMetricId = 73 + GPM_METRIC_NVLINK_L6_RX_PER_SEC GpmMetricId = 74 + GPM_METRIC_NVLINK_L6_TX_PER_SEC GpmMetricId = 75 + GPM_METRIC_NVLINK_L7_RX_PER_SEC GpmMetricId = 76 + GPM_METRIC_NVLINK_L7_TX_PER_SEC GpmMetricId = 77 + GPM_METRIC_NVLINK_L8_RX_PER_SEC GpmMetricId = 78 + GPM_METRIC_NVLINK_L8_TX_PER_SEC GpmMetricId = 79 + GPM_METRIC_NVLINK_L9_RX_PER_SEC GpmMetricId = 80 + GPM_METRIC_NVLINK_L9_TX_PER_SEC GpmMetricId = 81 + GPM_METRIC_NVLINK_L10_RX_PER_SEC GpmMetricId = 82 + GPM_METRIC_NVLINK_L10_TX_PER_SEC GpmMetricId = 83 + GPM_METRIC_NVLINK_L11_RX_PER_SEC GpmMetricId = 84 + GPM_METRIC_NVLINK_L11_TX_PER_SEC GpmMetricId = 85 + GPM_METRIC_NVLINK_L12_RX_PER_SEC GpmMetricId = 86 + GPM_METRIC_NVLINK_L12_TX_PER_SEC GpmMetricId = 87 + GPM_METRIC_NVLINK_L13_RX_PER_SEC GpmMetricId = 88 + GPM_METRIC_NVLINK_L13_TX_PER_SEC GpmMetricId = 89 + GPM_METRIC_NVLINK_L14_RX_PER_SEC GpmMetricId = 90 + GPM_METRIC_NVLINK_L14_TX_PER_SEC GpmMetricId = 91 + GPM_METRIC_NVLINK_L15_RX_PER_SEC GpmMetricId = 92 + GPM_METRIC_NVLINK_L15_TX_PER_SEC GpmMetricId = 93 + GPM_METRIC_NVLINK_L16_RX_PER_SEC GpmMetricId = 94 + GPM_METRIC_NVLINK_L16_TX_PER_SEC GpmMetricId = 95 + GPM_METRIC_NVLINK_L17_RX_PER_SEC GpmMetricId = 96 + GPM_METRIC_NVLINK_L17_TX_PER_SEC GpmMetricId = 97 + GPM_METRIC_MAX GpmMetricId = 98 +) diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_static.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_static.go new file mode 100644 index 00000000000..9038b312d64 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_static.go @@ -0,0 +1,27 @@ +// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "reflect" +) + +const ( + SYSTEM_PROCESS_NAME_BUFFER_SIZE = 256 +) + +func STRUCT_VERSION(data interface{}, version uint32) uint32 { + return uint32(uint32(reflect.Indirect(reflect.ValueOf(data)).Type().Size()) | (version << uint32(24))) +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go new file mode 100644 index 00000000000..ac778e5abe1 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go @@ -0,0 +1,3057 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "fmt" + "reflect" + "unsafe" +) + +// nvmlDeviceHandle attempts to convert a device d to an nvmlDevice. +// This is required for functions such as GetTopologyCommonAncestor which +// accept Device arguments that need to be passed to internal nvml* functions +// as nvmlDevice parameters. +func nvmlDeviceHandle(d Device) nvmlDevice { + var helper func(val reflect.Value) nvmlDevice + helper = func(val reflect.Value) nvmlDevice { + if val.Kind() == reflect.Interface { + val = val.Elem() + } + + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + if val.Type() == reflect.TypeOf(nvmlDevice{}) { + return val.Interface().(nvmlDevice) + } + + if val.Kind() != reflect.Struct { + panic(fmt.Errorf("unable to convert non-struct type %v to nvmlDevice", val.Kind())) + } + + for i := 0; i < val.Type().NumField(); i++ { + if !val.Type().Field(i).Anonymous { + continue + } + if !val.Field(i).Type().Implements(reflect.TypeOf((*Device)(nil)).Elem()) { + continue + } + return helper(val.Field(i)) + } + panic(fmt.Errorf("unable to convert %T to nvmlDevice", d)) + } + return helper(reflect.ValueOf(d)) +} + +// EccBitType +type EccBitType = MemoryErrorType + +// GpuInstanceInfo includes an interface type for Device instead of nvmlDevice +type GpuInstanceInfo struct { + Device Device + Id uint32 + ProfileId uint32 + Placement GpuInstancePlacement +} + +func (g GpuInstanceInfo) convert() nvmlGpuInstanceInfo { + out := nvmlGpuInstanceInfo{ + Device: g.Device.(nvmlDevice), + Id: g.Id, + ProfileId: g.ProfileId, + Placement: g.Placement, + } + return out +} + +func (g nvmlGpuInstanceInfo) convert() GpuInstanceInfo { + out := GpuInstanceInfo{ + Device: g.Device, + Id: g.Id, + ProfileId: g.ProfileId, + Placement: g.Placement, + } + return out +} + +// ComputeInstanceInfo includes an interface type for Device instead of nvmlDevice +type ComputeInstanceInfo struct { + Device Device + GpuInstance GpuInstance + Id uint32 + ProfileId uint32 + Placement ComputeInstancePlacement +} + +func (c ComputeInstanceInfo) convert() nvmlComputeInstanceInfo { + out := nvmlComputeInstanceInfo{ + Device: c.Device.(nvmlDevice), + GpuInstance: c.GpuInstance.(nvmlGpuInstance), + Id: c.Id, + ProfileId: c.ProfileId, + Placement: c.Placement, + } + return out +} + +func (c nvmlComputeInstanceInfo) convert() ComputeInstanceInfo { + out := ComputeInstanceInfo{ + Device: c.Device, + GpuInstance: c.GpuInstance, + Id: c.Id, + ProfileId: c.ProfileId, + Placement: c.Placement, + } + return out +} + +// nvml.DeviceGetCount() +func (l *library) DeviceGetCount() (int, Return) { + var deviceCount uint32 + ret := nvmlDeviceGetCount(&deviceCount) + return int(deviceCount), ret +} + +// nvml.DeviceGetHandleByIndex() +func (l *library) DeviceGetHandleByIndex(index int) (Device, Return) { + var device nvmlDevice + ret := nvmlDeviceGetHandleByIndex(uint32(index), &device) + return device, ret +} + +// nvml.DeviceGetHandleBySerial() +func (l *library) DeviceGetHandleBySerial(serial string) (Device, Return) { + var device nvmlDevice + ret := nvmlDeviceGetHandleBySerial(serial+string(rune(0)), &device) + return device, ret +} + +// nvml.DeviceGetHandleByUUID() +func (l *library) DeviceGetHandleByUUID(uuid string) (Device, Return) { + var device nvmlDevice + ret := nvmlDeviceGetHandleByUUID(uuid+string(rune(0)), &device) + return device, ret +} + +// nvml.DeviceGetHandleByPciBusId() +func (l *library) DeviceGetHandleByPciBusId(pciBusId string) (Device, Return) { + var device nvmlDevice + ret := nvmlDeviceGetHandleByPciBusId(pciBusId+string(rune(0)), &device) + return device, ret +} + +// nvml.DeviceGetName() +func (l *library) DeviceGetName(device Device) (string, Return) { + return device.GetName() +} + +func (device nvmlDevice) GetName() (string, Return) { + name := make([]byte, DEVICE_NAME_V2_BUFFER_SIZE) + ret := nvmlDeviceGetName(device, &name[0], DEVICE_NAME_V2_BUFFER_SIZE) + return string(name[:clen(name)]), ret +} + +// nvml.DeviceGetBrand() +func (l *library) DeviceGetBrand(device Device) (BrandType, Return) { + return device.GetBrand() +} + +func (device nvmlDevice) GetBrand() (BrandType, Return) { + var brandType BrandType + ret := nvmlDeviceGetBrand(device, &brandType) + return brandType, ret +} + +// nvml.DeviceGetIndex() +func (l *library) DeviceGetIndex(device Device) (int, Return) { + return device.GetIndex() +} + +func (device nvmlDevice) GetIndex() (int, Return) { + var index uint32 + ret := nvmlDeviceGetIndex(device, &index) + return int(index), ret +} + +// nvml.DeviceGetSerial() +func (l *library) DeviceGetSerial(device Device) (string, Return) { + return device.GetSerial() +} + +func (device nvmlDevice) GetSerial() (string, Return) { + serial := make([]byte, DEVICE_SERIAL_BUFFER_SIZE) + ret := nvmlDeviceGetSerial(device, &serial[0], DEVICE_SERIAL_BUFFER_SIZE) + return string(serial[:clen(serial)]), ret +} + +// nvml.DeviceGetCpuAffinity() +func (l *library) DeviceGetCpuAffinity(device Device, numCPUs int) ([]uint, Return) { + return device.GetCpuAffinity(numCPUs) +} + +func (device nvmlDevice) GetCpuAffinity(numCPUs int) ([]uint, Return) { + cpuSetSize := uint32((numCPUs-1)/int(unsafe.Sizeof(uint(0))) + 1) + cpuSet := make([]uint, cpuSetSize) + ret := nvmlDeviceGetCpuAffinity(device, cpuSetSize, &cpuSet[0]) + return cpuSet, ret +} + +// nvml.DeviceSetCpuAffinity() +func (l *library) DeviceSetCpuAffinity(device Device) Return { + return device.SetCpuAffinity() +} + +func (device nvmlDevice) SetCpuAffinity() Return { + return nvmlDeviceSetCpuAffinity(device) +} + +// nvml.DeviceClearCpuAffinity() +func (l *library) DeviceClearCpuAffinity(device Device) Return { + return device.ClearCpuAffinity() +} + +func (device nvmlDevice) ClearCpuAffinity() Return { + return nvmlDeviceClearCpuAffinity(device) +} + +// nvml.DeviceGetMemoryAffinity() +func (l *library) DeviceGetMemoryAffinity(device Device, numNodes int, scope AffinityScope) ([]uint, Return) { + return device.GetMemoryAffinity(numNodes, scope) +} + +func (device nvmlDevice) GetMemoryAffinity(numNodes int, scope AffinityScope) ([]uint, Return) { + nodeSetSize := uint32((numNodes-1)/int(unsafe.Sizeof(uint(0))) + 1) + nodeSet := make([]uint, nodeSetSize) + ret := nvmlDeviceGetMemoryAffinity(device, nodeSetSize, &nodeSet[0], scope) + return nodeSet, ret +} + +// nvml.DeviceGetCpuAffinityWithinScope() +func (l *library) DeviceGetCpuAffinityWithinScope(device Device, numCPUs int, scope AffinityScope) ([]uint, Return) { + return device.GetCpuAffinityWithinScope(numCPUs, scope) +} + +func (device nvmlDevice) GetCpuAffinityWithinScope(numCPUs int, scope AffinityScope) ([]uint, Return) { + cpuSetSize := uint32((numCPUs-1)/int(unsafe.Sizeof(uint(0))) + 1) + cpuSet := make([]uint, cpuSetSize) + ret := nvmlDeviceGetCpuAffinityWithinScope(device, cpuSetSize, &cpuSet[0], scope) + return cpuSet, ret +} + +// nvml.DeviceGetTopologyCommonAncestor() +func (l *library) DeviceGetTopologyCommonAncestor(device1 Device, device2 Device) (GpuTopologyLevel, Return) { + return device1.GetTopologyCommonAncestor(device2) +} + +func (device1 nvmlDevice) GetTopologyCommonAncestor(device2 Device) (GpuTopologyLevel, Return) { + var pathInfo GpuTopologyLevel + ret := nvmlDeviceGetTopologyCommonAncestorStub(device1, nvmlDeviceHandle(device2), &pathInfo) + return pathInfo, ret +} + +// nvmlDeviceGetTopologyCommonAncestorStub allows us to override this for testing. +var nvmlDeviceGetTopologyCommonAncestorStub = nvmlDeviceGetTopologyCommonAncestor + +// nvml.DeviceGetTopologyNearestGpus() +func (l *library) DeviceGetTopologyNearestGpus(device Device, level GpuTopologyLevel) ([]Device, Return) { + return device.GetTopologyNearestGpus(level) +} + +func (device nvmlDevice) GetTopologyNearestGpus(level GpuTopologyLevel) ([]Device, Return) { + var count uint32 + ret := nvmlDeviceGetTopologyNearestGpus(device, level, &count, nil) + if ret != SUCCESS { + return nil, ret + } + if count == 0 { + return []Device{}, ret + } + deviceArray := make([]nvmlDevice, count) + ret = nvmlDeviceGetTopologyNearestGpus(device, level, &count, &deviceArray[0]) + return convertSlice[nvmlDevice, Device](deviceArray), ret +} + +// nvml.DeviceGetP2PStatus() +func (l *library) DeviceGetP2PStatus(device1 Device, device2 Device, p2pIndex GpuP2PCapsIndex) (GpuP2PStatus, Return) { + return device1.GetP2PStatus(device2, p2pIndex) +} + +func (device1 nvmlDevice) GetP2PStatus(device2 Device, p2pIndex GpuP2PCapsIndex) (GpuP2PStatus, Return) { + var p2pStatus GpuP2PStatus + ret := nvmlDeviceGetP2PStatus(device1, nvmlDeviceHandle(device2), p2pIndex, &p2pStatus) + return p2pStatus, ret +} + +// nvml.DeviceGetUUID() +func (l *library) DeviceGetUUID(device Device) (string, Return) { + return device.GetUUID() +} + +func (device nvmlDevice) GetUUID() (string, Return) { + uuid := make([]byte, DEVICE_UUID_V2_BUFFER_SIZE) + ret := nvmlDeviceGetUUID(device, &uuid[0], DEVICE_UUID_V2_BUFFER_SIZE) + return string(uuid[:clen(uuid)]), ret +} + +// nvml.DeviceGetMinorNumber() +func (l *library) DeviceGetMinorNumber(device Device) (int, Return) { + return device.GetMinorNumber() +} + +func (device nvmlDevice) GetMinorNumber() (int, Return) { + var minorNumber uint32 + ret := nvmlDeviceGetMinorNumber(device, &minorNumber) + return int(minorNumber), ret +} + +// nvml.DeviceGetBoardPartNumber() +func (l *library) DeviceGetBoardPartNumber(device Device) (string, Return) { + return device.GetBoardPartNumber() +} + +func (device nvmlDevice) GetBoardPartNumber() (string, Return) { + partNumber := make([]byte, DEVICE_PART_NUMBER_BUFFER_SIZE) + ret := nvmlDeviceGetBoardPartNumber(device, &partNumber[0], DEVICE_PART_NUMBER_BUFFER_SIZE) + return string(partNumber[:clen(partNumber)]), ret +} + +// nvml.DeviceGetInforomVersion() +func (l *library) DeviceGetInforomVersion(device Device, object InforomObject) (string, Return) { + return device.GetInforomVersion(object) +} + +func (device nvmlDevice) GetInforomVersion(object InforomObject) (string, Return) { + version := make([]byte, DEVICE_INFOROM_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetInforomVersion(device, object, &version[0], DEVICE_INFOROM_VERSION_BUFFER_SIZE) + return string(version[:clen(version)]), ret +} + +// nvml.DeviceGetInforomImageVersion() +func (l *library) DeviceGetInforomImageVersion(device Device) (string, Return) { + return device.GetInforomImageVersion() +} + +func (device nvmlDevice) GetInforomImageVersion() (string, Return) { + version := make([]byte, DEVICE_INFOROM_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetInforomImageVersion(device, &version[0], DEVICE_INFOROM_VERSION_BUFFER_SIZE) + return string(version[:clen(version)]), ret +} + +// nvml.DeviceGetInforomConfigurationChecksum() +func (l *library) DeviceGetInforomConfigurationChecksum(device Device) (uint32, Return) { + return device.GetInforomConfigurationChecksum() +} + +func (device nvmlDevice) GetInforomConfigurationChecksum() (uint32, Return) { + var checksum uint32 + ret := nvmlDeviceGetInforomConfigurationChecksum(device, &checksum) + return checksum, ret +} + +// nvml.DeviceValidateInforom() +func (l *library) DeviceValidateInforom(device Device) Return { + return device.ValidateInforom() +} + +func (device nvmlDevice) ValidateInforom() Return { + return nvmlDeviceValidateInforom(device) +} + +// nvml.DeviceGetDisplayMode() +func (l *library) DeviceGetDisplayMode(device Device) (EnableState, Return) { + return device.GetDisplayMode() +} + +func (device nvmlDevice) GetDisplayMode() (EnableState, Return) { + var display EnableState + ret := nvmlDeviceGetDisplayMode(device, &display) + return display, ret +} + +// nvml.DeviceGetDisplayActive() +func (l *library) DeviceGetDisplayActive(device Device) (EnableState, Return) { + return device.GetDisplayActive() +} + +func (device nvmlDevice) GetDisplayActive() (EnableState, Return) { + var isActive EnableState + ret := nvmlDeviceGetDisplayActive(device, &isActive) + return isActive, ret +} + +// nvml.DeviceGetPersistenceMode() +func (l *library) DeviceGetPersistenceMode(device Device) (EnableState, Return) { + return device.GetPersistenceMode() +} + +func (device nvmlDevice) GetPersistenceMode() (EnableState, Return) { + var mode EnableState + ret := nvmlDeviceGetPersistenceMode(device, &mode) + return mode, ret +} + +// nvml.DeviceGetPciInfo() +func (l *library) DeviceGetPciInfo(device Device) (PciInfo, Return) { + return device.GetPciInfo() +} + +func (device nvmlDevice) GetPciInfo() (PciInfo, Return) { + var pci PciInfo + ret := nvmlDeviceGetPciInfo(device, &pci) + return pci, ret +} + +// nvml.DeviceGetMaxPcieLinkGeneration() +func (l *library) DeviceGetMaxPcieLinkGeneration(device Device) (int, Return) { + return device.GetMaxPcieLinkGeneration() +} + +func (device nvmlDevice) GetMaxPcieLinkGeneration() (int, Return) { + var maxLinkGen uint32 + ret := nvmlDeviceGetMaxPcieLinkGeneration(device, &maxLinkGen) + return int(maxLinkGen), ret +} + +// nvml.DeviceGetMaxPcieLinkWidth() +func (l *library) DeviceGetMaxPcieLinkWidth(device Device) (int, Return) { + return device.GetMaxPcieLinkWidth() +} + +func (device nvmlDevice) GetMaxPcieLinkWidth() (int, Return) { + var maxLinkWidth uint32 + ret := nvmlDeviceGetMaxPcieLinkWidth(device, &maxLinkWidth) + return int(maxLinkWidth), ret +} + +// nvml.DeviceGetCurrPcieLinkGeneration() +func (l *library) DeviceGetCurrPcieLinkGeneration(device Device) (int, Return) { + return device.GetCurrPcieLinkGeneration() +} + +func (device nvmlDevice) GetCurrPcieLinkGeneration() (int, Return) { + var currLinkGen uint32 + ret := nvmlDeviceGetCurrPcieLinkGeneration(device, &currLinkGen) + return int(currLinkGen), ret +} + +// nvml.DeviceGetCurrPcieLinkWidth() +func (l *library) DeviceGetCurrPcieLinkWidth(device Device) (int, Return) { + return device.GetCurrPcieLinkWidth() +} + +func (device nvmlDevice) GetCurrPcieLinkWidth() (int, Return) { + var currLinkWidth uint32 + ret := nvmlDeviceGetCurrPcieLinkWidth(device, &currLinkWidth) + return int(currLinkWidth), ret +} + +// nvml.DeviceGetPcieThroughput() +func (l *library) DeviceGetPcieThroughput(device Device, counter PcieUtilCounter) (uint32, Return) { + return device.GetPcieThroughput(counter) +} + +func (device nvmlDevice) GetPcieThroughput(counter PcieUtilCounter) (uint32, Return) { + var value uint32 + ret := nvmlDeviceGetPcieThroughput(device, counter, &value) + return value, ret +} + +// nvml.DeviceGetPcieReplayCounter() +func (l *library) DeviceGetPcieReplayCounter(device Device) (int, Return) { + return device.GetPcieReplayCounter() +} + +func (device nvmlDevice) GetPcieReplayCounter() (int, Return) { + var value uint32 + ret := nvmlDeviceGetPcieReplayCounter(device, &value) + return int(value), ret +} + +// nvml.nvmlDeviceGetClockInfo() +func (l *library) DeviceGetClockInfo(device Device, clockType ClockType) (uint32, Return) { + return device.GetClockInfo(clockType) +} + +func (device nvmlDevice) GetClockInfo(clockType ClockType) (uint32, Return) { + var clock uint32 + ret := nvmlDeviceGetClockInfo(device, clockType, &clock) + return clock, ret +} + +// nvml.DeviceGetMaxClockInfo() +func (l *library) DeviceGetMaxClockInfo(device Device, clockType ClockType) (uint32, Return) { + return device.GetMaxClockInfo(clockType) +} + +func (device nvmlDevice) GetMaxClockInfo(clockType ClockType) (uint32, Return) { + var clock uint32 + ret := nvmlDeviceGetMaxClockInfo(device, clockType, &clock) + return clock, ret +} + +// nvml.DeviceGetApplicationsClock() +func (l *library) DeviceGetApplicationsClock(device Device, clockType ClockType) (uint32, Return) { + return device.GetApplicationsClock(clockType) +} + +func (device nvmlDevice) GetApplicationsClock(clockType ClockType) (uint32, Return) { + var clockMHz uint32 + ret := nvmlDeviceGetApplicationsClock(device, clockType, &clockMHz) + return clockMHz, ret +} + +// nvml.DeviceGetDefaultApplicationsClock() +func (l *library) DeviceGetDefaultApplicationsClock(device Device, clockType ClockType) (uint32, Return) { + return device.GetDefaultApplicationsClock(clockType) +} + +func (device nvmlDevice) GetDefaultApplicationsClock(clockType ClockType) (uint32, Return) { + var clockMHz uint32 + ret := nvmlDeviceGetDefaultApplicationsClock(device, clockType, &clockMHz) + return clockMHz, ret +} + +// nvml.DeviceResetApplicationsClocks() +func (l *library) DeviceResetApplicationsClocks(device Device) Return { + return device.ResetApplicationsClocks() +} + +func (device nvmlDevice) ResetApplicationsClocks() Return { + return nvmlDeviceResetApplicationsClocks(device) +} + +// nvml.DeviceGetClock() +func (l *library) DeviceGetClock(device Device, clockType ClockType, clockId ClockId) (uint32, Return) { + return device.GetClock(clockType, clockId) +} + +func (device nvmlDevice) GetClock(clockType ClockType, clockId ClockId) (uint32, Return) { + var clockMHz uint32 + ret := nvmlDeviceGetClock(device, clockType, clockId, &clockMHz) + return clockMHz, ret +} + +// nvml.DeviceGetMaxCustomerBoostClock() +func (l *library) DeviceGetMaxCustomerBoostClock(device Device, clockType ClockType) (uint32, Return) { + return device.GetMaxCustomerBoostClock(clockType) +} + +func (device nvmlDevice) GetMaxCustomerBoostClock(clockType ClockType) (uint32, Return) { + var clockMHz uint32 + ret := nvmlDeviceGetMaxCustomerBoostClock(device, clockType, &clockMHz) + return clockMHz, ret +} + +// nvml.DeviceGetSupportedMemoryClocks() +func (l *library) DeviceGetSupportedMemoryClocks(device Device) (int, uint32, Return) { + return device.GetSupportedMemoryClocks() +} + +func (device nvmlDevice) GetSupportedMemoryClocks() (int, uint32, Return) { + var count, clocksMHz uint32 + ret := nvmlDeviceGetSupportedMemoryClocks(device, &count, &clocksMHz) + return int(count), clocksMHz, ret +} + +// nvml.DeviceGetSupportedGraphicsClocks() +func (l *library) DeviceGetSupportedGraphicsClocks(device Device, memoryClockMHz int) (int, uint32, Return) { + return device.GetSupportedGraphicsClocks(memoryClockMHz) +} + +func (device nvmlDevice) GetSupportedGraphicsClocks(memoryClockMHz int) (int, uint32, Return) { + var count, clocksMHz uint32 + ret := nvmlDeviceGetSupportedGraphicsClocks(device, uint32(memoryClockMHz), &count, &clocksMHz) + return int(count), clocksMHz, ret +} + +// nvml.DeviceGetAutoBoostedClocksEnabled() +func (l *library) DeviceGetAutoBoostedClocksEnabled(device Device) (EnableState, EnableState, Return) { + return device.GetAutoBoostedClocksEnabled() +} + +func (device nvmlDevice) GetAutoBoostedClocksEnabled() (EnableState, EnableState, Return) { + var isEnabled, defaultIsEnabled EnableState + ret := nvmlDeviceGetAutoBoostedClocksEnabled(device, &isEnabled, &defaultIsEnabled) + return isEnabled, defaultIsEnabled, ret +} + +// nvml.DeviceSetAutoBoostedClocksEnabled() +func (l *library) DeviceSetAutoBoostedClocksEnabled(device Device, enabled EnableState) Return { + return device.SetAutoBoostedClocksEnabled(enabled) +} + +func (device nvmlDevice) SetAutoBoostedClocksEnabled(enabled EnableState) Return { + return nvmlDeviceSetAutoBoostedClocksEnabled(device, enabled) +} + +// nvml.DeviceSetDefaultAutoBoostedClocksEnabled() +func (l *library) DeviceSetDefaultAutoBoostedClocksEnabled(device Device, enabled EnableState, flags uint32) Return { + return device.SetDefaultAutoBoostedClocksEnabled(enabled, flags) +} + +func (device nvmlDevice) SetDefaultAutoBoostedClocksEnabled(enabled EnableState, flags uint32) Return { + return nvmlDeviceSetDefaultAutoBoostedClocksEnabled(device, enabled, flags) +} + +// nvml.DeviceGetFanSpeed() +func (l *library) DeviceGetFanSpeed(device Device) (uint32, Return) { + return device.GetFanSpeed() +} + +func (device nvmlDevice) GetFanSpeed() (uint32, Return) { + var speed uint32 + ret := nvmlDeviceGetFanSpeed(device, &speed) + return speed, ret +} + +// nvml.DeviceGetFanSpeed_v2() +func (l *library) DeviceGetFanSpeed_v2(device Device, fan int) (uint32, Return) { + return device.GetFanSpeed_v2(fan) +} + +func (device nvmlDevice) GetFanSpeed_v2(fan int) (uint32, Return) { + var speed uint32 + ret := nvmlDeviceGetFanSpeed_v2(device, uint32(fan), &speed) + return speed, ret +} + +// nvml.DeviceGetNumFans() +func (l *library) DeviceGetNumFans(device Device) (int, Return) { + return device.GetNumFans() +} + +func (device nvmlDevice) GetNumFans() (int, Return) { + var numFans uint32 + ret := nvmlDeviceGetNumFans(device, &numFans) + return int(numFans), ret +} + +// nvml.DeviceGetTemperature() +func (l *library) DeviceGetTemperature(device Device, sensorType TemperatureSensors) (uint32, Return) { + return device.GetTemperature(sensorType) +} + +func (device nvmlDevice) GetTemperature(sensorType TemperatureSensors) (uint32, Return) { + var temp uint32 + ret := nvmlDeviceGetTemperature(device, sensorType, &temp) + return temp, ret +} + +// nvml.DeviceGetTemperatureThreshold() +func (l *library) DeviceGetTemperatureThreshold(device Device, thresholdType TemperatureThresholds) (uint32, Return) { + return device.GetTemperatureThreshold(thresholdType) +} + +func (device nvmlDevice) GetTemperatureThreshold(thresholdType TemperatureThresholds) (uint32, Return) { + var temp uint32 + ret := nvmlDeviceGetTemperatureThreshold(device, thresholdType, &temp) + return temp, ret +} + +// nvml.DeviceSetTemperatureThreshold() +func (l *library) DeviceSetTemperatureThreshold(device Device, thresholdType TemperatureThresholds, temp int) Return { + return device.SetTemperatureThreshold(thresholdType, temp) +} + +func (device nvmlDevice) SetTemperatureThreshold(thresholdType TemperatureThresholds, temp int) Return { + t := int32(temp) + ret := nvmlDeviceSetTemperatureThreshold(device, thresholdType, &t) + return ret +} + +// nvml.DeviceGetPerformanceState() +func (l *library) DeviceGetPerformanceState(device Device) (Pstates, Return) { + return device.GetPerformanceState() +} + +func (device nvmlDevice) GetPerformanceState() (Pstates, Return) { + var pState Pstates + ret := nvmlDeviceGetPerformanceState(device, &pState) + return pState, ret +} + +// nvml.DeviceGetCurrentClocksThrottleReasons() +func (l *library) DeviceGetCurrentClocksThrottleReasons(device Device) (uint64, Return) { + return device.GetCurrentClocksThrottleReasons() +} + +func (device nvmlDevice) GetCurrentClocksThrottleReasons() (uint64, Return) { + var clocksThrottleReasons uint64 + ret := nvmlDeviceGetCurrentClocksThrottleReasons(device, &clocksThrottleReasons) + return clocksThrottleReasons, ret +} + +// nvml.DeviceGetSupportedClocksThrottleReasons() +func (l *library) DeviceGetSupportedClocksThrottleReasons(device Device) (uint64, Return) { + return device.GetSupportedClocksThrottleReasons() +} + +func (device nvmlDevice) GetSupportedClocksThrottleReasons() (uint64, Return) { + var supportedClocksThrottleReasons uint64 + ret := nvmlDeviceGetSupportedClocksThrottleReasons(device, &supportedClocksThrottleReasons) + return supportedClocksThrottleReasons, ret +} + +// nvml.DeviceGetPowerState() +func (l *library) DeviceGetPowerState(device Device) (Pstates, Return) { + return device.GetPowerState() +} + +func (device nvmlDevice) GetPowerState() (Pstates, Return) { + var pState Pstates + ret := nvmlDeviceGetPowerState(device, &pState) + return pState, ret +} + +// nvml.DeviceGetPowerManagementMode() +func (l *library) DeviceGetPowerManagementMode(device Device) (EnableState, Return) { + return device.GetPowerManagementMode() +} + +func (device nvmlDevice) GetPowerManagementMode() (EnableState, Return) { + var mode EnableState + ret := nvmlDeviceGetPowerManagementMode(device, &mode) + return mode, ret +} + +// nvml.DeviceGetPowerManagementLimit() +func (l *library) DeviceGetPowerManagementLimit(device Device) (uint32, Return) { + return device.GetPowerManagementLimit() +} + +func (device nvmlDevice) GetPowerManagementLimit() (uint32, Return) { + var limit uint32 + ret := nvmlDeviceGetPowerManagementLimit(device, &limit) + return limit, ret +} + +// nvml.DeviceGetPowerManagementLimitConstraints() +func (l *library) DeviceGetPowerManagementLimitConstraints(device Device) (uint32, uint32, Return) { + return device.GetPowerManagementLimitConstraints() +} + +func (device nvmlDevice) GetPowerManagementLimitConstraints() (uint32, uint32, Return) { + var minLimit, maxLimit uint32 + ret := nvmlDeviceGetPowerManagementLimitConstraints(device, &minLimit, &maxLimit) + return minLimit, maxLimit, ret +} + +// nvml.DeviceGetPowerManagementDefaultLimit() +func (l *library) DeviceGetPowerManagementDefaultLimit(device Device) (uint32, Return) { + return device.GetPowerManagementDefaultLimit() +} + +func (device nvmlDevice) GetPowerManagementDefaultLimit() (uint32, Return) { + var defaultLimit uint32 + ret := nvmlDeviceGetPowerManagementDefaultLimit(device, &defaultLimit) + return defaultLimit, ret +} + +// nvml.DeviceGetPowerUsage() +func (l *library) DeviceGetPowerUsage(device Device) (uint32, Return) { + return device.GetPowerUsage() +} + +func (device nvmlDevice) GetPowerUsage() (uint32, Return) { + var power uint32 + ret := nvmlDeviceGetPowerUsage(device, &power) + return power, ret +} + +// nvml.DeviceGetTotalEnergyConsumption() +func (l *library) DeviceGetTotalEnergyConsumption(device Device) (uint64, Return) { + return device.GetTotalEnergyConsumption() +} + +func (device nvmlDevice) GetTotalEnergyConsumption() (uint64, Return) { + var energy uint64 + ret := nvmlDeviceGetTotalEnergyConsumption(device, &energy) + return energy, ret +} + +// nvml.DeviceGetEnforcedPowerLimit() +func (l *library) DeviceGetEnforcedPowerLimit(device Device) (uint32, Return) { + return device.GetEnforcedPowerLimit() +} + +func (device nvmlDevice) GetEnforcedPowerLimit() (uint32, Return) { + var limit uint32 + ret := nvmlDeviceGetEnforcedPowerLimit(device, &limit) + return limit, ret +} + +// nvml.DeviceGetGpuOperationMode() +func (l *library) DeviceGetGpuOperationMode(device Device) (GpuOperationMode, GpuOperationMode, Return) { + return device.GetGpuOperationMode() +} + +func (device nvmlDevice) GetGpuOperationMode() (GpuOperationMode, GpuOperationMode, Return) { + var current, pending GpuOperationMode + ret := nvmlDeviceGetGpuOperationMode(device, ¤t, &pending) + return current, pending, ret +} + +// nvml.DeviceGetMemoryInfo() +func (l *library) DeviceGetMemoryInfo(device Device) (Memory, Return) { + return device.GetMemoryInfo() +} + +func (device nvmlDevice) GetMemoryInfo() (Memory, Return) { + var memory Memory + ret := nvmlDeviceGetMemoryInfo(device, &memory) + return memory, ret +} + +// nvml.DeviceGetMemoryInfo_v2() +func (l *library) DeviceGetMemoryInfo_v2(device Device) (Memory_v2, Return) { + return device.GetMemoryInfo_v2() +} + +func (device nvmlDevice) GetMemoryInfo_v2() (Memory_v2, Return) { + var memory Memory_v2 + memory.Version = STRUCT_VERSION(memory, 2) + ret := nvmlDeviceGetMemoryInfo_v2(device, &memory) + return memory, ret +} + +// nvml.DeviceGetComputeMode() +func (l *library) DeviceGetComputeMode(device Device) (ComputeMode, Return) { + return device.GetComputeMode() +} + +func (device nvmlDevice) GetComputeMode() (ComputeMode, Return) { + var mode ComputeMode + ret := nvmlDeviceGetComputeMode(device, &mode) + return mode, ret +} + +// nvml.DeviceGetCudaComputeCapability() +func (l *library) DeviceGetCudaComputeCapability(device Device) (int, int, Return) { + return device.GetCudaComputeCapability() +} + +func (device nvmlDevice) GetCudaComputeCapability() (int, int, Return) { + var major, minor int32 + ret := nvmlDeviceGetCudaComputeCapability(device, &major, &minor) + return int(major), int(minor), ret +} + +// nvml.DeviceGetEccMode() +func (l *library) DeviceGetEccMode(device Device) (EnableState, EnableState, Return) { + return device.GetEccMode() +} + +func (device nvmlDevice) GetEccMode() (EnableState, EnableState, Return) { + var current, pending EnableState + ret := nvmlDeviceGetEccMode(device, ¤t, &pending) + return current, pending, ret +} + +// nvml.DeviceGetBoardId() +func (l *library) DeviceGetBoardId(device Device) (uint32, Return) { + return device.GetBoardId() +} + +func (device nvmlDevice) GetBoardId() (uint32, Return) { + var boardId uint32 + ret := nvmlDeviceGetBoardId(device, &boardId) + return boardId, ret +} + +// nvml.DeviceGetMultiGpuBoard() +func (l *library) DeviceGetMultiGpuBoard(device Device) (int, Return) { + return device.GetMultiGpuBoard() +} + +func (device nvmlDevice) GetMultiGpuBoard() (int, Return) { + var multiGpuBool uint32 + ret := nvmlDeviceGetMultiGpuBoard(device, &multiGpuBool) + return int(multiGpuBool), ret +} + +// nvml.DeviceGetTotalEccErrors() +func (l *library) DeviceGetTotalEccErrors(device Device, errorType MemoryErrorType, counterType EccCounterType) (uint64, Return) { + return device.GetTotalEccErrors(errorType, counterType) +} + +func (device nvmlDevice) GetTotalEccErrors(errorType MemoryErrorType, counterType EccCounterType) (uint64, Return) { + var eccCounts uint64 + ret := nvmlDeviceGetTotalEccErrors(device, errorType, counterType, &eccCounts) + return eccCounts, ret +} + +// nvml.DeviceGetDetailedEccErrors() +func (l *library) DeviceGetDetailedEccErrors(device Device, errorType MemoryErrorType, counterType EccCounterType) (EccErrorCounts, Return) { + return device.GetDetailedEccErrors(errorType, counterType) +} + +func (device nvmlDevice) GetDetailedEccErrors(errorType MemoryErrorType, counterType EccCounterType) (EccErrorCounts, Return) { + var eccCounts EccErrorCounts + ret := nvmlDeviceGetDetailedEccErrors(device, errorType, counterType, &eccCounts) + return eccCounts, ret +} + +// nvml.DeviceGetMemoryErrorCounter() +func (l *library) DeviceGetMemoryErrorCounter(device Device, errorType MemoryErrorType, counterType EccCounterType, locationType MemoryLocation) (uint64, Return) { + return device.GetMemoryErrorCounter(errorType, counterType, locationType) +} + +func (device nvmlDevice) GetMemoryErrorCounter(errorType MemoryErrorType, counterType EccCounterType, locationType MemoryLocation) (uint64, Return) { + var count uint64 + ret := nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, &count) + return count, ret +} + +// nvml.DeviceGetUtilizationRates() +func (l *library) DeviceGetUtilizationRates(device Device) (Utilization, Return) { + return device.GetUtilizationRates() +} + +func (device nvmlDevice) GetUtilizationRates() (Utilization, Return) { + var utilization Utilization + ret := nvmlDeviceGetUtilizationRates(device, &utilization) + return utilization, ret +} + +// nvml.DeviceGetEncoderUtilization() +func (l *library) DeviceGetEncoderUtilization(device Device) (uint32, uint32, Return) { + return device.GetEncoderUtilization() +} + +func (device nvmlDevice) GetEncoderUtilization() (uint32, uint32, Return) { + var utilization, samplingPeriodUs uint32 + ret := nvmlDeviceGetEncoderUtilization(device, &utilization, &samplingPeriodUs) + return utilization, samplingPeriodUs, ret +} + +// nvml.DeviceGetEncoderCapacity() +func (l *library) DeviceGetEncoderCapacity(device Device, encoderQueryType EncoderType) (int, Return) { + return device.GetEncoderCapacity(encoderQueryType) +} + +func (device nvmlDevice) GetEncoderCapacity(encoderQueryType EncoderType) (int, Return) { + var encoderCapacity uint32 + ret := nvmlDeviceGetEncoderCapacity(device, encoderQueryType, &encoderCapacity) + return int(encoderCapacity), ret +} + +// nvml.DeviceGetEncoderStats() +func (l *library) DeviceGetEncoderStats(device Device) (int, uint32, uint32, Return) { + return device.GetEncoderStats() +} + +func (device nvmlDevice) GetEncoderStats() (int, uint32, uint32, Return) { + var sessionCount, averageFps, averageLatency uint32 + ret := nvmlDeviceGetEncoderStats(device, &sessionCount, &averageFps, &averageLatency) + return int(sessionCount), averageFps, averageLatency, ret +} + +// nvml.DeviceGetEncoderSessions() +func (l *library) DeviceGetEncoderSessions(device Device) ([]EncoderSessionInfo, Return) { + return device.GetEncoderSessions() +} + +func (device nvmlDevice) GetEncoderSessions() ([]EncoderSessionInfo, Return) { + var sessionCount uint32 = 1 // Will be reduced upon returning + for { + sessionInfos := make([]EncoderSessionInfo, sessionCount) + ret := nvmlDeviceGetEncoderSessions(device, &sessionCount, &sessionInfos[0]) + if ret == SUCCESS { + return sessionInfos[:sessionCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + sessionCount *= 2 + } +} + +// nvml.DeviceGetDecoderUtilization() +func (l *library) DeviceGetDecoderUtilization(device Device) (uint32, uint32, Return) { + return device.GetDecoderUtilization() +} + +func (device nvmlDevice) GetDecoderUtilization() (uint32, uint32, Return) { + var utilization, samplingPeriodUs uint32 + ret := nvmlDeviceGetDecoderUtilization(device, &utilization, &samplingPeriodUs) + return utilization, samplingPeriodUs, ret +} + +// nvml.DeviceGetFBCStats() +func (l *library) DeviceGetFBCStats(device Device) (FBCStats, Return) { + return device.GetFBCStats() +} + +func (device nvmlDevice) GetFBCStats() (FBCStats, Return) { + var fbcStats FBCStats + ret := nvmlDeviceGetFBCStats(device, &fbcStats) + return fbcStats, ret +} + +// nvml.DeviceGetFBCSessions() +func (l *library) DeviceGetFBCSessions(device Device) ([]FBCSessionInfo, Return) { + return device.GetFBCSessions() +} + +func (device nvmlDevice) GetFBCSessions() ([]FBCSessionInfo, Return) { + var sessionCount uint32 = 1 // Will be reduced upon returning + for { + sessionInfo := make([]FBCSessionInfo, sessionCount) + ret := nvmlDeviceGetFBCSessions(device, &sessionCount, &sessionInfo[0]) + if ret == SUCCESS { + return sessionInfo[:sessionCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + sessionCount *= 2 + } +} + +// nvml.DeviceGetDriverModel() +func (l *library) DeviceGetDriverModel(device Device) (DriverModel, DriverModel, Return) { + return device.GetDriverModel() +} + +func (device nvmlDevice) GetDriverModel() (DriverModel, DriverModel, Return) { + var current, pending DriverModel + ret := nvmlDeviceGetDriverModel(device, ¤t, &pending) + return current, pending, ret +} + +// nvml.DeviceGetVbiosVersion() +func (l *library) DeviceGetVbiosVersion(device Device) (string, Return) { + return device.GetVbiosVersion() +} + +func (device nvmlDevice) GetVbiosVersion() (string, Return) { + version := make([]byte, DEVICE_VBIOS_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetVbiosVersion(device, &version[0], DEVICE_VBIOS_VERSION_BUFFER_SIZE) + return string(version[:clen(version)]), ret +} + +// nvml.DeviceGetBridgeChipInfo() +func (l *library) DeviceGetBridgeChipInfo(device Device) (BridgeChipHierarchy, Return) { + return device.GetBridgeChipInfo() +} + +func (device nvmlDevice) GetBridgeChipInfo() (BridgeChipHierarchy, Return) { + var bridgeHierarchy BridgeChipHierarchy + ret := nvmlDeviceGetBridgeChipInfo(device, &bridgeHierarchy) + return bridgeHierarchy, ret +} + +// nvml.DeviceGetComputeRunningProcesses() +func deviceGetComputeRunningProcesses_v1(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo_v1, infoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v1(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(infos[:infoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func deviceGetComputeRunningProcesses_v2(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo_v2, infoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v2(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(infos[:infoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func deviceGetComputeRunningProcesses_v3(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo, infoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v3(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return infos[:infoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func (l *library) DeviceGetComputeRunningProcesses(device Device) ([]ProcessInfo, Return) { + return device.GetComputeRunningProcesses() +} + +func (device nvmlDevice) GetComputeRunningProcesses() ([]ProcessInfo, Return) { + return deviceGetComputeRunningProcesses(device) +} + +// nvml.DeviceGetGraphicsRunningProcesses() +func deviceGetGraphicsRunningProcesses_v1(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo_v1, infoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v1(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(infos[:infoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func deviceGetGraphicsRunningProcesses_v2(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo_v2, infoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v2(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(infos[:infoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func deviceGetGraphicsRunningProcesses_v3(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo, infoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v3(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return infos[:infoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func (l *library) DeviceGetGraphicsRunningProcesses(device Device) ([]ProcessInfo, Return) { + return device.GetGraphicsRunningProcesses() +} + +func (device nvmlDevice) GetGraphicsRunningProcesses() ([]ProcessInfo, Return) { + return deviceGetGraphicsRunningProcesses(device) +} + +// nvml.DeviceGetMPSComputeRunningProcesses() +func deviceGetMPSComputeRunningProcesses_v1(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo_v1, infoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v1(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(infos[:infoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func deviceGetMPSComputeRunningProcesses_v2(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo_v2, infoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v2(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(infos[:infoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func deviceGetMPSComputeRunningProcesses_v3(device nvmlDevice) ([]ProcessInfo, Return) { + var infoCount uint32 = 1 // Will be reduced upon returning + for { + infos := make([]ProcessInfo, infoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v3(device, &infoCount, &infos[0]) + if ret == SUCCESS { + return infos[:infoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + infoCount *= 2 + } +} + +func (l *library) DeviceGetMPSComputeRunningProcesses(device Device) ([]ProcessInfo, Return) { + return device.GetMPSComputeRunningProcesses() +} + +func (device nvmlDevice) GetMPSComputeRunningProcesses() ([]ProcessInfo, Return) { + return deviceGetMPSComputeRunningProcesses(device) +} + +// nvml.DeviceOnSameBoard() +func (l *library) DeviceOnSameBoard(device1 Device, device2 Device) (int, Return) { + return device1.OnSameBoard(device2) +} + +func (device1 nvmlDevice) OnSameBoard(device2 Device) (int, Return) { + var onSameBoard int32 + ret := nvmlDeviceOnSameBoard(device1, nvmlDeviceHandle(device2), &onSameBoard) + return int(onSameBoard), ret +} + +// nvml.DeviceGetAPIRestriction() +func (l *library) DeviceGetAPIRestriction(device Device, apiType RestrictedAPI) (EnableState, Return) { + return device.GetAPIRestriction(apiType) +} + +func (device nvmlDevice) GetAPIRestriction(apiType RestrictedAPI) (EnableState, Return) { + var isRestricted EnableState + ret := nvmlDeviceGetAPIRestriction(device, apiType, &isRestricted) + return isRestricted, ret +} + +// nvml.DeviceGetSamples() +func (l *library) DeviceGetSamples(device Device, samplingType SamplingType, lastSeenTimestamp uint64) (ValueType, []Sample, Return) { + return device.GetSamples(samplingType, lastSeenTimestamp) +} + +func (device nvmlDevice) GetSamples(samplingType SamplingType, lastSeenTimestamp uint64) (ValueType, []Sample, Return) { + var sampleValType ValueType + var sampleCount uint32 + ret := nvmlDeviceGetSamples(device, samplingType, lastSeenTimestamp, &sampleValType, &sampleCount, nil) + if ret != SUCCESS { + return sampleValType, nil, ret + } + if sampleCount == 0 { + return sampleValType, []Sample{}, ret + } + samples := make([]Sample, sampleCount) + ret = nvmlDeviceGetSamples(device, samplingType, lastSeenTimestamp, &sampleValType, &sampleCount, &samples[0]) + return sampleValType, samples, ret +} + +// nvml.DeviceGetBAR1MemoryInfo() +func (l *library) DeviceGetBAR1MemoryInfo(device Device) (BAR1Memory, Return) { + return device.GetBAR1MemoryInfo() +} + +func (device nvmlDevice) GetBAR1MemoryInfo() (BAR1Memory, Return) { + var bar1Memory BAR1Memory + ret := nvmlDeviceGetBAR1MemoryInfo(device, &bar1Memory) + return bar1Memory, ret +} + +// nvml.DeviceGetViolationStatus() +func (l *library) DeviceGetViolationStatus(device Device, perfPolicyType PerfPolicyType) (ViolationTime, Return) { + return device.GetViolationStatus(perfPolicyType) +} + +func (device nvmlDevice) GetViolationStatus(perfPolicyType PerfPolicyType) (ViolationTime, Return) { + var violTime ViolationTime + ret := nvmlDeviceGetViolationStatus(device, perfPolicyType, &violTime) + return violTime, ret +} + +// nvml.DeviceGetIrqNum() +func (l *library) DeviceGetIrqNum(device Device) (int, Return) { + return device.GetIrqNum() +} + +func (device nvmlDevice) GetIrqNum() (int, Return) { + var irqNum uint32 + ret := nvmlDeviceGetIrqNum(device, &irqNum) + return int(irqNum), ret +} + +// nvml.DeviceGetNumGpuCores() +func (l *library) DeviceGetNumGpuCores(device Device) (int, Return) { + return device.GetNumGpuCores() +} + +func (device nvmlDevice) GetNumGpuCores() (int, Return) { + var numCores uint32 + ret := nvmlDeviceGetNumGpuCores(device, &numCores) + return int(numCores), ret +} + +// nvml.DeviceGetPowerSource() +func (l *library) DeviceGetPowerSource(device Device) (PowerSource, Return) { + return device.GetPowerSource() +} + +func (device nvmlDevice) GetPowerSource() (PowerSource, Return) { + var powerSource PowerSource + ret := nvmlDeviceGetPowerSource(device, &powerSource) + return powerSource, ret +} + +// nvml.DeviceGetMemoryBusWidth() +func (l *library) DeviceGetMemoryBusWidth(device Device) (uint32, Return) { + return device.GetMemoryBusWidth() +} + +func (device nvmlDevice) GetMemoryBusWidth() (uint32, Return) { + var busWidth uint32 + ret := nvmlDeviceGetMemoryBusWidth(device, &busWidth) + return busWidth, ret +} + +// nvml.DeviceGetPcieLinkMaxSpeed() +func (l *library) DeviceGetPcieLinkMaxSpeed(device Device) (uint32, Return) { + return device.GetPcieLinkMaxSpeed() +} + +func (device nvmlDevice) GetPcieLinkMaxSpeed() (uint32, Return) { + var maxSpeed uint32 + ret := nvmlDeviceGetPcieLinkMaxSpeed(device, &maxSpeed) + return maxSpeed, ret +} + +// nvml.DeviceGetAdaptiveClockInfoStatus() +func (l *library) DeviceGetAdaptiveClockInfoStatus(device Device) (uint32, Return) { + return device.GetAdaptiveClockInfoStatus() +} + +func (device nvmlDevice) GetAdaptiveClockInfoStatus() (uint32, Return) { + var adaptiveClockStatus uint32 + ret := nvmlDeviceGetAdaptiveClockInfoStatus(device, &adaptiveClockStatus) + return adaptiveClockStatus, ret +} + +// nvml.DeviceGetAccountingMode() +func (l *library) DeviceGetAccountingMode(device Device) (EnableState, Return) { + return device.GetAccountingMode() +} + +func (device nvmlDevice) GetAccountingMode() (EnableState, Return) { + var mode EnableState + ret := nvmlDeviceGetAccountingMode(device, &mode) + return mode, ret +} + +// nvml.DeviceGetAccountingStats() +func (l *library) DeviceGetAccountingStats(device Device, pid uint32) (AccountingStats, Return) { + return device.GetAccountingStats(pid) +} + +func (device nvmlDevice) GetAccountingStats(pid uint32) (AccountingStats, Return) { + var stats AccountingStats + ret := nvmlDeviceGetAccountingStats(device, pid, &stats) + return stats, ret +} + +// nvml.DeviceGetAccountingPids() +func (l *library) DeviceGetAccountingPids(device Device) ([]int, Return) { + return device.GetAccountingPids() +} + +func (device nvmlDevice) GetAccountingPids() ([]int, Return) { + var count uint32 = 1 // Will be reduced upon returning + for { + pids := make([]uint32, count) + ret := nvmlDeviceGetAccountingPids(device, &count, &pids[0]) + if ret == SUCCESS { + return uint32SliceToIntSlice(pids[:count]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + count *= 2 + } +} + +// nvml.DeviceGetAccountingBufferSize() +func (l *library) DeviceGetAccountingBufferSize(device Device) (int, Return) { + return device.GetAccountingBufferSize() +} + +func (device nvmlDevice) GetAccountingBufferSize() (int, Return) { + var bufferSize uint32 + ret := nvmlDeviceGetAccountingBufferSize(device, &bufferSize) + return int(bufferSize), ret +} + +// nvml.DeviceGetRetiredPages() +func (l *library) DeviceGetRetiredPages(device Device, cause PageRetirementCause) ([]uint64, Return) { + return device.GetRetiredPages(cause) +} + +func (device nvmlDevice) GetRetiredPages(cause PageRetirementCause) ([]uint64, Return) { + var pageCount uint32 = 1 // Will be reduced upon returning + for { + addresses := make([]uint64, pageCount) + ret := nvmlDeviceGetRetiredPages(device, cause, &pageCount, &addresses[0]) + if ret == SUCCESS { + return addresses[:pageCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + pageCount *= 2 + } +} + +// nvml.DeviceGetRetiredPages_v2() +func (l *library) DeviceGetRetiredPages_v2(device Device, cause PageRetirementCause) ([]uint64, []uint64, Return) { + return device.GetRetiredPages_v2(cause) +} + +func (device nvmlDevice) GetRetiredPages_v2(cause PageRetirementCause) ([]uint64, []uint64, Return) { + var pageCount uint32 = 1 // Will be reduced upon returning + for { + addresses := make([]uint64, pageCount) + timestamps := make([]uint64, pageCount) + ret := nvmlDeviceGetRetiredPages_v2(device, cause, &pageCount, &addresses[0], ×tamps[0]) + if ret == SUCCESS { + return addresses[:pageCount], timestamps[:pageCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, nil, ret + } + pageCount *= 2 + } +} + +// nvml.DeviceGetRetiredPagesPendingStatus() +func (l *library) DeviceGetRetiredPagesPendingStatus(device Device) (EnableState, Return) { + return device.GetRetiredPagesPendingStatus() +} + +func (device nvmlDevice) GetRetiredPagesPendingStatus() (EnableState, Return) { + var isPending EnableState + ret := nvmlDeviceGetRetiredPagesPendingStatus(device, &isPending) + return isPending, ret +} + +// nvml.DeviceSetPersistenceMode() +func (l *library) DeviceSetPersistenceMode(device Device, mode EnableState) Return { + return device.SetPersistenceMode(mode) +} + +func (device nvmlDevice) SetPersistenceMode(mode EnableState) Return { + return nvmlDeviceSetPersistenceMode(device, mode) +} + +// nvml.DeviceSetComputeMode() +func (l *library) DeviceSetComputeMode(device Device, mode ComputeMode) Return { + return device.SetComputeMode(mode) +} + +func (device nvmlDevice) SetComputeMode(mode ComputeMode) Return { + return nvmlDeviceSetComputeMode(device, mode) +} + +// nvml.DeviceSetEccMode() +func (l *library) DeviceSetEccMode(device Device, ecc EnableState) Return { + return device.SetEccMode(ecc) +} + +func (device nvmlDevice) SetEccMode(ecc EnableState) Return { + return nvmlDeviceSetEccMode(device, ecc) +} + +// nvml.DeviceClearEccErrorCounts() +func (l *library) DeviceClearEccErrorCounts(device Device, counterType EccCounterType) Return { + return device.ClearEccErrorCounts(counterType) +} + +func (device nvmlDevice) ClearEccErrorCounts(counterType EccCounterType) Return { + return nvmlDeviceClearEccErrorCounts(device, counterType) +} + +// nvml.DeviceSetDriverModel() +func (l *library) DeviceSetDriverModel(device Device, driverModel DriverModel, flags uint32) Return { + return device.SetDriverModel(driverModel, flags) +} + +func (device nvmlDevice) SetDriverModel(driverModel DriverModel, flags uint32) Return { + return nvmlDeviceSetDriverModel(device, driverModel, flags) +} + +// nvml.DeviceSetGpuLockedClocks() +func (l *library) DeviceSetGpuLockedClocks(device Device, minGpuClockMHz uint32, maxGpuClockMHz uint32) Return { + return device.SetGpuLockedClocks(minGpuClockMHz, maxGpuClockMHz) +} + +func (device nvmlDevice) SetGpuLockedClocks(minGpuClockMHz uint32, maxGpuClockMHz uint32) Return { + return nvmlDeviceSetGpuLockedClocks(device, minGpuClockMHz, maxGpuClockMHz) +} + +// nvml.DeviceResetGpuLockedClocks() +func (l *library) DeviceResetGpuLockedClocks(device Device) Return { + return device.ResetGpuLockedClocks() +} + +func (device nvmlDevice) ResetGpuLockedClocks() Return { + return nvmlDeviceResetGpuLockedClocks(device) +} + +// nvmlDeviceSetMemoryLockedClocks() +func (l *library) DeviceSetMemoryLockedClocks(device Device, minMemClockMHz uint32, maxMemClockMHz uint32) Return { + return device.SetMemoryLockedClocks(minMemClockMHz, maxMemClockMHz) +} + +func (device nvmlDevice) SetMemoryLockedClocks(minMemClockMHz uint32, maxMemClockMHz uint32) Return { + return nvmlDeviceSetMemoryLockedClocks(device, minMemClockMHz, maxMemClockMHz) +} + +// nvmlDeviceResetMemoryLockedClocks() +func (l *library) DeviceResetMemoryLockedClocks(device Device) Return { + return device.ResetMemoryLockedClocks() +} + +func (device nvmlDevice) ResetMemoryLockedClocks() Return { + return nvmlDeviceResetMemoryLockedClocks(device) +} + +// nvml.DeviceGetClkMonStatus() +func (l *library) DeviceGetClkMonStatus(device Device) (ClkMonStatus, Return) { + return device.GetClkMonStatus() +} + +func (device nvmlDevice) GetClkMonStatus() (ClkMonStatus, Return) { + var status ClkMonStatus + ret := nvmlDeviceGetClkMonStatus(device, &status) + return status, ret +} + +// nvml.DeviceSetApplicationsClocks() +func (l *library) DeviceSetApplicationsClocks(device Device, memClockMHz uint32, graphicsClockMHz uint32) Return { + return device.SetApplicationsClocks(memClockMHz, graphicsClockMHz) +} + +func (device nvmlDevice) SetApplicationsClocks(memClockMHz uint32, graphicsClockMHz uint32) Return { + return nvmlDeviceSetApplicationsClocks(device, memClockMHz, graphicsClockMHz) +} + +// nvml.DeviceSetPowerManagementLimit() +func (l *library) DeviceSetPowerManagementLimit(device Device, limit uint32) Return { + return device.SetPowerManagementLimit(limit) +} + +func (device nvmlDevice) SetPowerManagementLimit(limit uint32) Return { + return nvmlDeviceSetPowerManagementLimit(device, limit) +} + +// nvml.DeviceSetGpuOperationMode() +func (l *library) DeviceSetGpuOperationMode(device Device, mode GpuOperationMode) Return { + return device.SetGpuOperationMode(mode) +} + +func (device nvmlDevice) SetGpuOperationMode(mode GpuOperationMode) Return { + return nvmlDeviceSetGpuOperationMode(device, mode) +} + +// nvml.DeviceSetAPIRestriction() +func (l *library) DeviceSetAPIRestriction(device Device, apiType RestrictedAPI, isRestricted EnableState) Return { + return device.SetAPIRestriction(apiType, isRestricted) +} + +func (device nvmlDevice) SetAPIRestriction(apiType RestrictedAPI, isRestricted EnableState) Return { + return nvmlDeviceSetAPIRestriction(device, apiType, isRestricted) +} + +// nvml.DeviceSetAccountingMode() +func (l *library) DeviceSetAccountingMode(device Device, mode EnableState) Return { + return device.SetAccountingMode(mode) +} + +func (device nvmlDevice) SetAccountingMode(mode EnableState) Return { + return nvmlDeviceSetAccountingMode(device, mode) +} + +// nvml.DeviceClearAccountingPids() +func (l *library) DeviceClearAccountingPids(device Device) Return { + return device.ClearAccountingPids() +} + +func (device nvmlDevice) ClearAccountingPids() Return { + return nvmlDeviceClearAccountingPids(device) +} + +// nvml.DeviceGetNvLinkState() +func (l *library) DeviceGetNvLinkState(device Device, link int) (EnableState, Return) { + return device.GetNvLinkState(link) +} + +func (device nvmlDevice) GetNvLinkState(link int) (EnableState, Return) { + var isActive EnableState + ret := nvmlDeviceGetNvLinkState(device, uint32(link), &isActive) + return isActive, ret +} + +// nvml.DeviceGetNvLinkVersion() +func (l *library) DeviceGetNvLinkVersion(device Device, link int) (uint32, Return) { + return device.GetNvLinkVersion(link) +} + +func (device nvmlDevice) GetNvLinkVersion(link int) (uint32, Return) { + var version uint32 + ret := nvmlDeviceGetNvLinkVersion(device, uint32(link), &version) + return version, ret +} + +// nvml.DeviceGetNvLinkCapability() +func (l *library) DeviceGetNvLinkCapability(device Device, link int, capability NvLinkCapability) (uint32, Return) { + return device.GetNvLinkCapability(link, capability) +} + +func (device nvmlDevice) GetNvLinkCapability(link int, capability NvLinkCapability) (uint32, Return) { + var capResult uint32 + ret := nvmlDeviceGetNvLinkCapability(device, uint32(link), capability, &capResult) + return capResult, ret +} + +// nvml.DeviceGetNvLinkRemotePciInfo() +func (l *library) DeviceGetNvLinkRemotePciInfo(device Device, link int) (PciInfo, Return) { + return device.GetNvLinkRemotePciInfo(link) +} + +func (device nvmlDevice) GetNvLinkRemotePciInfo(link int) (PciInfo, Return) { + var pci PciInfo + ret := nvmlDeviceGetNvLinkRemotePciInfo(device, uint32(link), &pci) + return pci, ret +} + +// nvml.DeviceGetNvLinkErrorCounter() +func (l *library) DeviceGetNvLinkErrorCounter(device Device, link int, counter NvLinkErrorCounter) (uint64, Return) { + return device.GetNvLinkErrorCounter(link, counter) +} + +func (device nvmlDevice) GetNvLinkErrorCounter(link int, counter NvLinkErrorCounter) (uint64, Return) { + var counterValue uint64 + ret := nvmlDeviceGetNvLinkErrorCounter(device, uint32(link), counter, &counterValue) + return counterValue, ret +} + +// nvml.DeviceResetNvLinkErrorCounters() +func (l *library) DeviceResetNvLinkErrorCounters(device Device, link int) Return { + return device.ResetNvLinkErrorCounters(link) +} + +func (device nvmlDevice) ResetNvLinkErrorCounters(link int) Return { + return nvmlDeviceResetNvLinkErrorCounters(device, uint32(link)) +} + +// nvml.DeviceSetNvLinkUtilizationControl() +func (l *library) DeviceSetNvLinkUtilizationControl(device Device, link int, counter int, control *NvLinkUtilizationControl, reset bool) Return { + return device.SetNvLinkUtilizationControl(link, counter, control, reset) +} + +func (device nvmlDevice) SetNvLinkUtilizationControl(link int, counter int, control *NvLinkUtilizationControl, reset bool) Return { + resetValue := uint32(0) + if reset { + resetValue = 1 + } + return nvmlDeviceSetNvLinkUtilizationControl(device, uint32(link), uint32(counter), control, resetValue) +} + +// nvml.DeviceGetNvLinkUtilizationControl() +func (l *library) DeviceGetNvLinkUtilizationControl(device Device, link int, counter int) (NvLinkUtilizationControl, Return) { + return device.GetNvLinkUtilizationControl(link, counter) +} + +func (device nvmlDevice) GetNvLinkUtilizationControl(link int, counter int) (NvLinkUtilizationControl, Return) { + var control NvLinkUtilizationControl + ret := nvmlDeviceGetNvLinkUtilizationControl(device, uint32(link), uint32(counter), &control) + return control, ret +} + +// nvml.DeviceGetNvLinkUtilizationCounter() +func (l *library) DeviceGetNvLinkUtilizationCounter(device Device, link int, counter int) (uint64, uint64, Return) { + return device.GetNvLinkUtilizationCounter(link, counter) +} + +func (device nvmlDevice) GetNvLinkUtilizationCounter(link int, counter int) (uint64, uint64, Return) { + var rxCounter, txCounter uint64 + ret := nvmlDeviceGetNvLinkUtilizationCounter(device, uint32(link), uint32(counter), &rxCounter, &txCounter) + return rxCounter, txCounter, ret +} + +// nvml.DeviceFreezeNvLinkUtilizationCounter() +func (l *library) DeviceFreezeNvLinkUtilizationCounter(device Device, link int, counter int, freeze EnableState) Return { + return device.FreezeNvLinkUtilizationCounter(link, counter, freeze) +} + +func (device nvmlDevice) FreezeNvLinkUtilizationCounter(link int, counter int, freeze EnableState) Return { + return nvmlDeviceFreezeNvLinkUtilizationCounter(device, uint32(link), uint32(counter), freeze) +} + +// nvml.DeviceResetNvLinkUtilizationCounter() +func (l *library) DeviceResetNvLinkUtilizationCounter(device Device, link int, counter int) Return { + return device.ResetNvLinkUtilizationCounter(link, counter) +} + +func (device nvmlDevice) ResetNvLinkUtilizationCounter(link int, counter int) Return { + return nvmlDeviceResetNvLinkUtilizationCounter(device, uint32(link), uint32(counter)) +} + +// nvml.DeviceGetNvLinkRemoteDeviceType() +func (l *library) DeviceGetNvLinkRemoteDeviceType(device Device, link int) (IntNvLinkDeviceType, Return) { + return device.GetNvLinkRemoteDeviceType(link) +} + +func (device nvmlDevice) GetNvLinkRemoteDeviceType(link int) (IntNvLinkDeviceType, Return) { + var nvLinkDeviceType IntNvLinkDeviceType + ret := nvmlDeviceGetNvLinkRemoteDeviceType(device, uint32(link), &nvLinkDeviceType) + return nvLinkDeviceType, ret +} + +// nvml.DeviceRegisterEvents() +func (l *library) DeviceRegisterEvents(device Device, eventTypes uint64, set EventSet) Return { + return device.RegisterEvents(eventTypes, set) +} + +func (device nvmlDevice) RegisterEvents(eventTypes uint64, set EventSet) Return { + return nvmlDeviceRegisterEvents(device, eventTypes, set.(nvmlEventSet)) +} + +// nvmlDeviceGetSupportedEventTypes() +func (l *library) DeviceGetSupportedEventTypes(device Device) (uint64, Return) { + return device.GetSupportedEventTypes() +} + +func (device nvmlDevice) GetSupportedEventTypes() (uint64, Return) { + var eventTypes uint64 + ret := nvmlDeviceGetSupportedEventTypes(device, &eventTypes) + return eventTypes, ret +} + +// nvml.DeviceModifyDrainState() +func (l *library) DeviceModifyDrainState(pciInfo *PciInfo, newState EnableState) Return { + return nvmlDeviceModifyDrainState(pciInfo, newState) +} + +// nvml.DeviceQueryDrainState() +func (l *library) DeviceQueryDrainState(pciInfo *PciInfo) (EnableState, Return) { + var currentState EnableState + ret := nvmlDeviceQueryDrainState(pciInfo, ¤tState) + return currentState, ret +} + +// nvml.DeviceRemoveGpu() +func (l *library) DeviceRemoveGpu(pciInfo *PciInfo) Return { + return nvmlDeviceRemoveGpu(pciInfo) +} + +// nvml.DeviceRemoveGpu_v2() +func (l *library) DeviceRemoveGpu_v2(pciInfo *PciInfo, gpuState DetachGpuState, linkState PcieLinkState) Return { + return nvmlDeviceRemoveGpu_v2(pciInfo, gpuState, linkState) +} + +// nvml.DeviceDiscoverGpus() +func (l *library) DeviceDiscoverGpus() (PciInfo, Return) { + var pciInfo PciInfo + ret := nvmlDeviceDiscoverGpus(&pciInfo) + return pciInfo, ret +} + +// nvml.DeviceGetFieldValues() +func (l *library) DeviceGetFieldValues(device Device, values []FieldValue) Return { + return device.GetFieldValues(values) +} + +func (device nvmlDevice) GetFieldValues(values []FieldValue) Return { + valuesCount := len(values) + return nvmlDeviceGetFieldValues(device, int32(valuesCount), &values[0]) +} + +// nvml.DeviceGetVirtualizationMode() +func (l *library) DeviceGetVirtualizationMode(device Device) (GpuVirtualizationMode, Return) { + return device.GetVirtualizationMode() +} + +func (device nvmlDevice) GetVirtualizationMode() (GpuVirtualizationMode, Return) { + var pVirtualMode GpuVirtualizationMode + ret := nvmlDeviceGetVirtualizationMode(device, &pVirtualMode) + return pVirtualMode, ret +} + +// nvml.DeviceGetHostVgpuMode() +func (l *library) DeviceGetHostVgpuMode(device Device) (HostVgpuMode, Return) { + return device.GetHostVgpuMode() +} + +func (device nvmlDevice) GetHostVgpuMode() (HostVgpuMode, Return) { + var pHostVgpuMode HostVgpuMode + ret := nvmlDeviceGetHostVgpuMode(device, &pHostVgpuMode) + return pHostVgpuMode, ret +} + +// nvml.DeviceSetVirtualizationMode() +func (l *library) DeviceSetVirtualizationMode(device Device, virtualMode GpuVirtualizationMode) Return { + return device.SetVirtualizationMode(virtualMode) +} + +func (device nvmlDevice) SetVirtualizationMode(virtualMode GpuVirtualizationMode) Return { + return nvmlDeviceSetVirtualizationMode(device, virtualMode) +} + +// nvml.DeviceGetGridLicensableFeatures() +func (l *library) DeviceGetGridLicensableFeatures(device Device) (GridLicensableFeatures, Return) { + return device.GetGridLicensableFeatures() +} + +func (device nvmlDevice) GetGridLicensableFeatures() (GridLicensableFeatures, Return) { + var pGridLicensableFeatures GridLicensableFeatures + ret := nvmlDeviceGetGridLicensableFeatures(device, &pGridLicensableFeatures) + return pGridLicensableFeatures, ret +} + +// nvml.DeviceGetProcessUtilization() +func (l *library) DeviceGetProcessUtilization(device Device, lastSeenTimestamp uint64) ([]ProcessUtilizationSample, Return) { + return device.GetProcessUtilization(lastSeenTimestamp) +} + +func (device nvmlDevice) GetProcessUtilization(lastSeenTimestamp uint64) ([]ProcessUtilizationSample, Return) { + var processSamplesCount uint32 + ret := nvmlDeviceGetProcessUtilization(device, nil, &processSamplesCount, lastSeenTimestamp) + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + if processSamplesCount == 0 { + return []ProcessUtilizationSample{}, ret + } + utilization := make([]ProcessUtilizationSample, processSamplesCount) + ret = nvmlDeviceGetProcessUtilization(device, &utilization[0], &processSamplesCount, lastSeenTimestamp) + return utilization[:processSamplesCount], ret +} + +// nvml.DeviceGetSupportedVgpus() +func (l *library) DeviceGetSupportedVgpus(device Device) ([]VgpuTypeId, Return) { + return device.GetSupportedVgpus() +} + +func (device nvmlDevice) GetSupportedVgpus() ([]VgpuTypeId, Return) { + var vgpuCount uint32 = 1 // Will be reduced upon returning + for { + vgpuTypeIds := make([]nvmlVgpuTypeId, vgpuCount) + ret := nvmlDeviceGetSupportedVgpus(device, &vgpuCount, &vgpuTypeIds[0]) + if ret == SUCCESS { + return convertSlice[nvmlVgpuTypeId, VgpuTypeId](vgpuTypeIds[:vgpuCount]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + vgpuCount *= 2 + } +} + +// nvml.DeviceGetCreatableVgpus() +func (l *library) DeviceGetCreatableVgpus(device Device) ([]VgpuTypeId, Return) { + return device.GetCreatableVgpus() +} + +func (device nvmlDevice) GetCreatableVgpus() ([]VgpuTypeId, Return) { + var vgpuCount uint32 = 1 // Will be reduced upon returning + for { + vgpuTypeIds := make([]nvmlVgpuTypeId, vgpuCount) + ret := nvmlDeviceGetCreatableVgpus(device, &vgpuCount, &vgpuTypeIds[0]) + if ret == SUCCESS { + return convertSlice[nvmlVgpuTypeId, VgpuTypeId](vgpuTypeIds[:vgpuCount]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + vgpuCount *= 2 + } +} + +// nvml.DeviceGetActiveVgpus() +func (l *library) DeviceGetActiveVgpus(device Device) ([]VgpuInstance, Return) { + return device.GetActiveVgpus() +} + +func (device nvmlDevice) GetActiveVgpus() ([]VgpuInstance, Return) { + var vgpuCount uint32 = 1 // Will be reduced upon returning + for { + vgpuInstances := make([]nvmlVgpuInstance, vgpuCount) + ret := nvmlDeviceGetActiveVgpus(device, &vgpuCount, &vgpuInstances[0]) + if ret == SUCCESS { + return convertSlice[nvmlVgpuInstance, VgpuInstance](vgpuInstances[:vgpuCount]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + vgpuCount *= 2 + } +} + +// nvml.DeviceGetVgpuMetadata() +func (l *library) DeviceGetVgpuMetadata(device Device) (VgpuPgpuMetadata, Return) { + return device.GetVgpuMetadata() +} + +func (device nvmlDevice) GetVgpuMetadata() (VgpuPgpuMetadata, Return) { + var vgpuPgpuMetadata VgpuPgpuMetadata + opaqueDataSize := unsafe.Sizeof(vgpuPgpuMetadata.nvmlVgpuPgpuMetadata.OpaqueData) + vgpuPgpuMetadataSize := unsafe.Sizeof(vgpuPgpuMetadata.nvmlVgpuPgpuMetadata) - opaqueDataSize + for { + bufferSize := uint32(vgpuPgpuMetadataSize + opaqueDataSize) + buffer := make([]byte, bufferSize) + nvmlVgpuPgpuMetadataPtr := (*nvmlVgpuPgpuMetadata)(unsafe.Pointer(&buffer[0])) + ret := nvmlDeviceGetVgpuMetadata(device, nvmlVgpuPgpuMetadataPtr, &bufferSize) + if ret == SUCCESS { + vgpuPgpuMetadata.nvmlVgpuPgpuMetadata = *nvmlVgpuPgpuMetadataPtr + vgpuPgpuMetadata.OpaqueData = buffer[vgpuPgpuMetadataSize:bufferSize] + return vgpuPgpuMetadata, ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return vgpuPgpuMetadata, ret + } + opaqueDataSize = 2 * opaqueDataSize + } +} + +// nvml.DeviceGetPgpuMetadataString() +func (l *library) DeviceGetPgpuMetadataString(device Device) (string, Return) { + return device.GetPgpuMetadataString() +} + +func (device nvmlDevice) GetPgpuMetadataString() (string, Return) { + var bufferSize uint32 = 1 // Will be reduced upon returning + for { + pgpuMetadata := make([]byte, bufferSize) + ret := nvmlDeviceGetPgpuMetadataString(device, &pgpuMetadata[0], &bufferSize) + if ret == SUCCESS { + return string(pgpuMetadata[:clen(pgpuMetadata)]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return "", ret + } + bufferSize *= 2 + } +} + +// nvml.DeviceGetVgpuUtilization() +func (l *library) DeviceGetVgpuUtilization(device Device, lastSeenTimestamp uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) { + return device.GetVgpuUtilization(lastSeenTimestamp) +} + +func (device nvmlDevice) GetVgpuUtilization(lastSeenTimestamp uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) { + var sampleValType ValueType + var vgpuInstanceSamplesCount uint32 = 1 // Will be reduced upon returning + for { + utilizationSamples := make([]VgpuInstanceUtilizationSample, vgpuInstanceSamplesCount) + ret := nvmlDeviceGetVgpuUtilization(device, lastSeenTimestamp, &sampleValType, &vgpuInstanceSamplesCount, &utilizationSamples[0]) + if ret == SUCCESS { + return sampleValType, utilizationSamples[:vgpuInstanceSamplesCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return sampleValType, nil, ret + } + vgpuInstanceSamplesCount *= 2 + } +} + +// nvml.DeviceGetAttributes() +func (l *library) DeviceGetAttributes(device Device) (DeviceAttributes, Return) { + return device.GetAttributes() +} + +func (device nvmlDevice) GetAttributes() (DeviceAttributes, Return) { + var attributes DeviceAttributes + ret := nvmlDeviceGetAttributes(device, &attributes) + return attributes, ret +} + +// nvml.DeviceGetRemappedRows() +func (l *library) DeviceGetRemappedRows(device Device) (int, int, bool, bool, Return) { + return device.GetRemappedRows() +} + +func (device nvmlDevice) GetRemappedRows() (int, int, bool, bool, Return) { + var corrRows, uncRows, isPending, failureOccured uint32 + ret := nvmlDeviceGetRemappedRows(device, &corrRows, &uncRows, &isPending, &failureOccured) + return int(corrRows), int(uncRows), (isPending != 0), (failureOccured != 0), ret +} + +// nvml.DeviceGetRowRemapperHistogram() +func (l *library) DeviceGetRowRemapperHistogram(device Device) (RowRemapperHistogramValues, Return) { + return device.GetRowRemapperHistogram() +} + +func (device nvmlDevice) GetRowRemapperHistogram() (RowRemapperHistogramValues, Return) { + var values RowRemapperHistogramValues + ret := nvmlDeviceGetRowRemapperHistogram(device, &values) + return values, ret +} + +// nvml.DeviceGetArchitecture() +func (l *library) DeviceGetArchitecture(device Device) (DeviceArchitecture, Return) { + return device.GetArchitecture() +} + +func (device nvmlDevice) GetArchitecture() (DeviceArchitecture, Return) { + var arch DeviceArchitecture + ret := nvmlDeviceGetArchitecture(device, &arch) + return arch, ret +} + +// nvml.DeviceGetVgpuProcessUtilization() +func (l *library) DeviceGetVgpuProcessUtilization(device Device, lastSeenTimestamp uint64) ([]VgpuProcessUtilizationSample, Return) { + return device.GetVgpuProcessUtilization(lastSeenTimestamp) +} + +func (device nvmlDevice) GetVgpuProcessUtilization(lastSeenTimestamp uint64) ([]VgpuProcessUtilizationSample, Return) { + var vgpuProcessSamplesCount uint32 = 1 // Will be reduced upon returning + for { + utilizationSamples := make([]VgpuProcessUtilizationSample, vgpuProcessSamplesCount) + ret := nvmlDeviceGetVgpuProcessUtilization(device, lastSeenTimestamp, &vgpuProcessSamplesCount, &utilizationSamples[0]) + if ret == SUCCESS { + return utilizationSamples[:vgpuProcessSamplesCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + vgpuProcessSamplesCount *= 2 + } +} + +// nvml.GetExcludedDeviceCount() +func (l *library) GetExcludedDeviceCount() (int, Return) { + var deviceCount uint32 + ret := nvmlGetExcludedDeviceCount(&deviceCount) + return int(deviceCount), ret +} + +// nvml.GetExcludedDeviceInfoByIndex() +func (l *library) GetExcludedDeviceInfoByIndex(index int) (ExcludedDeviceInfo, Return) { + var info ExcludedDeviceInfo + ret := nvmlGetExcludedDeviceInfoByIndex(uint32(index), &info) + return info, ret +} + +// nvml.DeviceSetMigMode() +func (l *library) DeviceSetMigMode(device Device, mode int) (Return, Return) { + return device.SetMigMode(mode) +} + +func (device nvmlDevice) SetMigMode(mode int) (Return, Return) { + var activationStatus Return + ret := nvmlDeviceSetMigMode(device, uint32(mode), &activationStatus) + return activationStatus, ret +} + +// nvml.DeviceGetMigMode() +func (l *library) DeviceGetMigMode(device Device) (int, int, Return) { + return device.GetMigMode() +} + +func (device nvmlDevice) GetMigMode() (int, int, Return) { + var currentMode, pendingMode uint32 + ret := nvmlDeviceGetMigMode(device, ¤tMode, &pendingMode) + return int(currentMode), int(pendingMode), ret +} + +// nvml.DeviceGetGpuInstanceProfileInfo() +func (l *library) DeviceGetGpuInstanceProfileInfo(device Device, profile int) (GpuInstanceProfileInfo, Return) { + return device.GetGpuInstanceProfileInfo(profile) +} + +func (device nvmlDevice) GetGpuInstanceProfileInfo(profile int) (GpuInstanceProfileInfo, Return) { + var info GpuInstanceProfileInfo + ret := nvmlDeviceGetGpuInstanceProfileInfo(device, uint32(profile), &info) + return info, ret +} + +// nvml.DeviceGetGpuInstanceProfileInfoV() +type GpuInstanceProfileInfoHandler struct { + device nvmlDevice + profile int +} + +func (handler GpuInstanceProfileInfoHandler) V1() (GpuInstanceProfileInfo, Return) { + return DeviceGetGpuInstanceProfileInfo(handler.device, handler.profile) +} + +func (handler GpuInstanceProfileInfoHandler) V2() (GpuInstanceProfileInfo_v2, Return) { + var info GpuInstanceProfileInfo_v2 + info.Version = STRUCT_VERSION(info, 2) + ret := nvmlDeviceGetGpuInstanceProfileInfoV(handler.device, uint32(handler.profile), &info) + return info, ret +} + +func (l *library) DeviceGetGpuInstanceProfileInfoV(device Device, profile int) GpuInstanceProfileInfoHandler { + return device.GetGpuInstanceProfileInfoV(profile) +} + +func (device nvmlDevice) GetGpuInstanceProfileInfoV(profile int) GpuInstanceProfileInfoHandler { + return GpuInstanceProfileInfoHandler{device, profile} +} + +// nvml.DeviceGetGpuInstancePossiblePlacements() +func (l *library) DeviceGetGpuInstancePossiblePlacements(device Device, info *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) { + return device.GetGpuInstancePossiblePlacements(info) +} + +func (device nvmlDevice) GetGpuInstancePossiblePlacements(info *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) { + if info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var count uint32 + ret := nvmlDeviceGetGpuInstancePossiblePlacements(device, info.Id, nil, &count) + if ret != SUCCESS { + return nil, ret + } + if count == 0 { + return []GpuInstancePlacement{}, ret + } + placements := make([]GpuInstancePlacement, count) + ret = nvmlDeviceGetGpuInstancePossiblePlacements(device, info.Id, &placements[0], &count) + return placements[:count], ret +} + +// nvml.DeviceGetGpuInstanceRemainingCapacity() +func (l *library) DeviceGetGpuInstanceRemainingCapacity(device Device, info *GpuInstanceProfileInfo) (int, Return) { + return device.GetGpuInstanceRemainingCapacity(info) +} + +func (device nvmlDevice) GetGpuInstanceRemainingCapacity(info *GpuInstanceProfileInfo) (int, Return) { + if info == nil { + return 0, ERROR_INVALID_ARGUMENT + } + var count uint32 + ret := nvmlDeviceGetGpuInstanceRemainingCapacity(device, info.Id, &count) + return int(count), ret +} + +// nvml.DeviceCreateGpuInstance() +func (l *library) DeviceCreateGpuInstance(device Device, info *GpuInstanceProfileInfo) (GpuInstance, Return) { + return device.CreateGpuInstance(info) +} + +func (device nvmlDevice) CreateGpuInstance(info *GpuInstanceProfileInfo) (GpuInstance, Return) { + if info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var gpuInstance nvmlGpuInstance + ret := nvmlDeviceCreateGpuInstance(device, info.Id, &gpuInstance) + return gpuInstance, ret +} + +// nvml.DeviceCreateGpuInstanceWithPlacement() +func (l *library) DeviceCreateGpuInstanceWithPlacement(device Device, info *GpuInstanceProfileInfo, placement *GpuInstancePlacement) (GpuInstance, Return) { + return device.CreateGpuInstanceWithPlacement(info, placement) +} + +func (device nvmlDevice) CreateGpuInstanceWithPlacement(info *GpuInstanceProfileInfo, placement *GpuInstancePlacement) (GpuInstance, Return) { + if info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var gpuInstance nvmlGpuInstance + ret := nvmlDeviceCreateGpuInstanceWithPlacement(device, info.Id, placement, &gpuInstance) + return gpuInstance, ret +} + +// nvml.GpuInstanceDestroy() +func (l *library) GpuInstanceDestroy(gpuInstance GpuInstance) Return { + return gpuInstance.Destroy() +} + +func (gpuInstance nvmlGpuInstance) Destroy() Return { + return nvmlGpuInstanceDestroy(gpuInstance) +} + +// nvml.DeviceGetGpuInstances() +func (l *library) DeviceGetGpuInstances(device Device, info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + return device.GetGpuInstances(info) +} + +func (device nvmlDevice) GetGpuInstances(info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + if info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var count uint32 = info.InstanceCount + gpuInstances := make([]nvmlGpuInstance, count) + ret := nvmlDeviceGetGpuInstances(device, info.Id, &gpuInstances[0], &count) + return convertSlice[nvmlGpuInstance, GpuInstance](gpuInstances[:count]), ret +} + +// nvml.DeviceGetGpuInstanceById() +func (l *library) DeviceGetGpuInstanceById(device Device, id int) (GpuInstance, Return) { + return device.GetGpuInstanceById(id) +} + +func (device nvmlDevice) GetGpuInstanceById(id int) (GpuInstance, Return) { + var gpuInstance nvmlGpuInstance + ret := nvmlDeviceGetGpuInstanceById(device, uint32(id), &gpuInstance) + return gpuInstance, ret +} + +// nvml.GpuInstanceGetInfo() +func (l *library) GpuInstanceGetInfo(gpuInstance GpuInstance) (GpuInstanceInfo, Return) { + return gpuInstance.GetInfo() +} + +func (gpuInstance nvmlGpuInstance) GetInfo() (GpuInstanceInfo, Return) { + var info nvmlGpuInstanceInfo + ret := nvmlGpuInstanceGetInfo(gpuInstance, &info) + return info.convert(), ret +} + +// nvml.GpuInstanceGetComputeInstanceProfileInfo() +func (l *library) GpuInstanceGetComputeInstanceProfileInfo(gpuInstance GpuInstance, profile int, engProfile int) (ComputeInstanceProfileInfo, Return) { + return gpuInstance.GetComputeInstanceProfileInfo(profile, engProfile) +} + +func (gpuInstance nvmlGpuInstance) GetComputeInstanceProfileInfo(profile int, engProfile int) (ComputeInstanceProfileInfo, Return) { + var info ComputeInstanceProfileInfo + ret := nvmlGpuInstanceGetComputeInstanceProfileInfo(gpuInstance, uint32(profile), uint32(engProfile), &info) + return info, ret +} + +// nvml.GpuInstanceGetComputeInstanceProfileInfoV() +type ComputeInstanceProfileInfoHandler struct { + gpuInstance nvmlGpuInstance + profile int + engProfile int +} + +func (handler ComputeInstanceProfileInfoHandler) V1() (ComputeInstanceProfileInfo, Return) { + return GpuInstanceGetComputeInstanceProfileInfo(handler.gpuInstance, handler.profile, handler.engProfile) +} + +func (handler ComputeInstanceProfileInfoHandler) V2() (ComputeInstanceProfileInfo_v2, Return) { + var info ComputeInstanceProfileInfo_v2 + info.Version = STRUCT_VERSION(info, 2) + ret := nvmlGpuInstanceGetComputeInstanceProfileInfoV(handler.gpuInstance, uint32(handler.profile), uint32(handler.engProfile), &info) + return info, ret +} + +func (l *library) GpuInstanceGetComputeInstanceProfileInfoV(gpuInstance GpuInstance, profile int, engProfile int) ComputeInstanceProfileInfoHandler { + return gpuInstance.GetComputeInstanceProfileInfoV(profile, engProfile) +} + +func (gpuInstance nvmlGpuInstance) GetComputeInstanceProfileInfoV(profile int, engProfile int) ComputeInstanceProfileInfoHandler { + return ComputeInstanceProfileInfoHandler{gpuInstance, profile, engProfile} +} + +// nvml.GpuInstanceGetComputeInstanceRemainingCapacity() +func (l *library) GpuInstanceGetComputeInstanceRemainingCapacity(gpuInstance GpuInstance, info *ComputeInstanceProfileInfo) (int, Return) { + return gpuInstance.GetComputeInstanceRemainingCapacity(info) +} + +func (gpuInstance nvmlGpuInstance) GetComputeInstanceRemainingCapacity(info *ComputeInstanceProfileInfo) (int, Return) { + if info == nil { + return 0, ERROR_INVALID_ARGUMENT + } + var count uint32 + ret := nvmlGpuInstanceGetComputeInstanceRemainingCapacity(gpuInstance, info.Id, &count) + return int(count), ret +} + +// nvml.GpuInstanceCreateComputeInstance() +func (l *library) GpuInstanceCreateComputeInstance(gpuInstance GpuInstance, info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + return gpuInstance.CreateComputeInstance(info) +} + +func (gpuInstance nvmlGpuInstance) CreateComputeInstance(info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + if info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var computeInstance nvmlComputeInstance + ret := nvmlGpuInstanceCreateComputeInstance(gpuInstance, info.Id, &computeInstance) + return computeInstance, ret +} + +// nvml.ComputeInstanceDestroy() +func (l *library) ComputeInstanceDestroy(computeInstance ComputeInstance) Return { + return computeInstance.Destroy() +} + +func (computeInstance nvmlComputeInstance) Destroy() Return { + return nvmlComputeInstanceDestroy(computeInstance) +} + +// nvml.GpuInstanceGetComputeInstances() +func (l *library) GpuInstanceGetComputeInstances(gpuInstance GpuInstance, info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + return gpuInstance.GetComputeInstances(info) +} + +func (gpuInstance nvmlGpuInstance) GetComputeInstances(info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + if info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var count uint32 = info.InstanceCount + computeInstances := make([]nvmlComputeInstance, count) + ret := nvmlGpuInstanceGetComputeInstances(gpuInstance, info.Id, &computeInstances[0], &count) + return convertSlice[nvmlComputeInstance, ComputeInstance](computeInstances[:count]), ret +} + +// nvml.GpuInstanceGetComputeInstanceById() +func (l *library) GpuInstanceGetComputeInstanceById(gpuInstance GpuInstance, id int) (ComputeInstance, Return) { + return gpuInstance.GetComputeInstanceById(id) +} + +func (gpuInstance nvmlGpuInstance) GetComputeInstanceById(id int) (ComputeInstance, Return) { + var computeInstance nvmlComputeInstance + ret := nvmlGpuInstanceGetComputeInstanceById(gpuInstance, uint32(id), &computeInstance) + return computeInstance, ret +} + +// nvml.ComputeInstanceGetInfo() +func (l *library) ComputeInstanceGetInfo(computeInstance ComputeInstance) (ComputeInstanceInfo, Return) { + return computeInstance.GetInfo() +} + +func (computeInstance nvmlComputeInstance) GetInfo() (ComputeInstanceInfo, Return) { + var info nvmlComputeInstanceInfo + ret := nvmlComputeInstanceGetInfo(computeInstance, &info) + return info.convert(), ret +} + +// nvml.DeviceIsMigDeviceHandle() +func (l *library) DeviceIsMigDeviceHandle(device Device) (bool, Return) { + return device.IsMigDeviceHandle() +} + +func (device nvmlDevice) IsMigDeviceHandle() (bool, Return) { + var isMigDevice uint32 + ret := nvmlDeviceIsMigDeviceHandle(device, &isMigDevice) + return (isMigDevice != 0), ret +} + +// nvml DeviceGetGpuInstanceId() +func (l *library) DeviceGetGpuInstanceId(device Device) (int, Return) { + return device.GetGpuInstanceId() +} + +func (device nvmlDevice) GetGpuInstanceId() (int, Return) { + var id uint32 + ret := nvmlDeviceGetGpuInstanceId(device, &id) + return int(id), ret +} + +// nvml.DeviceGetComputeInstanceId() +func (l *library) DeviceGetComputeInstanceId(device Device) (int, Return) { + return device.GetComputeInstanceId() +} + +func (device nvmlDevice) GetComputeInstanceId() (int, Return) { + var id uint32 + ret := nvmlDeviceGetComputeInstanceId(device, &id) + return int(id), ret +} + +// nvml.DeviceGetMaxMigDeviceCount() +func (l *library) DeviceGetMaxMigDeviceCount(device Device) (int, Return) { + return device.GetMaxMigDeviceCount() +} + +func (device nvmlDevice) GetMaxMigDeviceCount() (int, Return) { + var count uint32 + ret := nvmlDeviceGetMaxMigDeviceCount(device, &count) + return int(count), ret +} + +// nvml.DeviceGetMigDeviceHandleByIndex() +func (l *library) DeviceGetMigDeviceHandleByIndex(device Device, index int) (Device, Return) { + return device.GetMigDeviceHandleByIndex(index) +} + +func (device nvmlDevice) GetMigDeviceHandleByIndex(index int) (Device, Return) { + var migDevice nvmlDevice + ret := nvmlDeviceGetMigDeviceHandleByIndex(device, uint32(index), &migDevice) + return migDevice, ret +} + +// nvml.DeviceGetDeviceHandleFromMigDeviceHandle() +func (l *library) DeviceGetDeviceHandleFromMigDeviceHandle(migdevice Device) (Device, Return) { + return migdevice.GetDeviceHandleFromMigDeviceHandle() +} + +func (migDevice nvmlDevice) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + var device nvmlDevice + ret := nvmlDeviceGetDeviceHandleFromMigDeviceHandle(migDevice, &device) + return device, ret +} + +// nvml.DeviceGetBusType() +func (l *library) DeviceGetBusType(device Device) (BusType, Return) { + return device.GetBusType() +} + +func (device nvmlDevice) GetBusType() (BusType, Return) { + var busType BusType + ret := nvmlDeviceGetBusType(device, &busType) + return busType, ret +} + +// nvml.DeviceSetDefaultFanSpeed_v2() +func (l *library) DeviceSetDefaultFanSpeed_v2(device Device, fan int) Return { + return device.SetDefaultFanSpeed_v2(fan) +} + +func (device nvmlDevice) SetDefaultFanSpeed_v2(fan int) Return { + return nvmlDeviceSetDefaultFanSpeed_v2(device, uint32(fan)) +} + +// nvml.DeviceGetMinMaxFanSpeed() +func (l *library) DeviceGetMinMaxFanSpeed(device Device) (int, int, Return) { + return device.GetMinMaxFanSpeed() +} + +func (device nvmlDevice) GetMinMaxFanSpeed() (int, int, Return) { + var minSpeed, maxSpeed uint32 + ret := nvmlDeviceGetMinMaxFanSpeed(device, &minSpeed, &maxSpeed) + return int(minSpeed), int(maxSpeed), ret +} + +// nvml.DeviceGetThermalSettings() +func (l *library) DeviceGetThermalSettings(device Device, sensorIndex uint32) (GpuThermalSettings, Return) { + return device.GetThermalSettings(sensorIndex) +} + +func (device nvmlDevice) GetThermalSettings(sensorIndex uint32) (GpuThermalSettings, Return) { + var pThermalSettings GpuThermalSettings + ret := nvmlDeviceGetThermalSettings(device, sensorIndex, &pThermalSettings) + return pThermalSettings, ret +} + +// nvml.DeviceGetDefaultEccMode() +func (l *library) DeviceGetDefaultEccMode(device Device) (EnableState, Return) { + return device.GetDefaultEccMode() +} + +func (device nvmlDevice) GetDefaultEccMode() (EnableState, Return) { + var defaultMode EnableState + ret := nvmlDeviceGetDefaultEccMode(device, &defaultMode) + return defaultMode, ret +} + +// nvml.DeviceGetPcieSpeed() +func (l *library) DeviceGetPcieSpeed(device Device) (int, Return) { + return device.GetPcieSpeed() +} + +func (device nvmlDevice) GetPcieSpeed() (int, Return) { + var pcieSpeed uint32 + ret := nvmlDeviceGetPcieSpeed(device, &pcieSpeed) + return int(pcieSpeed), ret +} + +// nvml.DeviceGetGspFirmwareVersion() +func (l *library) DeviceGetGspFirmwareVersion(device Device) (string, Return) { + return device.GetGspFirmwareVersion() +} + +func (device nvmlDevice) GetGspFirmwareVersion() (string, Return) { + version := make([]byte, GSP_FIRMWARE_VERSION_BUF_SIZE) + ret := nvmlDeviceGetGspFirmwareVersion(device, &version[0]) + return string(version[:clen(version)]), ret +} + +// nvml.DeviceGetGspFirmwareMode() +func (l *library) DeviceGetGspFirmwareMode(device Device) (bool, bool, Return) { + return device.GetGspFirmwareMode() +} + +func (device nvmlDevice) GetGspFirmwareMode() (bool, bool, Return) { + var isEnabled, defaultMode uint32 + ret := nvmlDeviceGetGspFirmwareMode(device, &isEnabled, &defaultMode) + return (isEnabled != 0), (defaultMode != 0), ret +} + +// nvml.DeviceGetDynamicPstatesInfo() +func (l *library) DeviceGetDynamicPstatesInfo(device Device) (GpuDynamicPstatesInfo, Return) { + return device.GetDynamicPstatesInfo() +} + +func (device nvmlDevice) GetDynamicPstatesInfo() (GpuDynamicPstatesInfo, Return) { + var pDynamicPstatesInfo GpuDynamicPstatesInfo + ret := nvmlDeviceGetDynamicPstatesInfo(device, &pDynamicPstatesInfo) + return pDynamicPstatesInfo, ret +} + +// nvml.DeviceSetFanSpeed_v2() +func (l *library) DeviceSetFanSpeed_v2(device Device, fan int, speed int) Return { + return device.SetFanSpeed_v2(fan, speed) +} + +func (device nvmlDevice) SetFanSpeed_v2(fan int, speed int) Return { + return nvmlDeviceSetFanSpeed_v2(device, uint32(fan), uint32(speed)) +} + +// nvml.DeviceGetGpcClkVfOffset() +func (l *library) DeviceGetGpcClkVfOffset(device Device) (int, Return) { + return device.GetGpcClkVfOffset() +} + +func (device nvmlDevice) GetGpcClkVfOffset() (int, Return) { + var offset int32 + ret := nvmlDeviceGetGpcClkVfOffset(device, &offset) + return int(offset), ret +} + +// nvml.DeviceSetGpcClkVfOffset() +func (l *library) DeviceSetGpcClkVfOffset(device Device, offset int) Return { + return device.SetGpcClkVfOffset(offset) +} + +func (device nvmlDevice) SetGpcClkVfOffset(offset int) Return { + return nvmlDeviceSetGpcClkVfOffset(device, int32(offset)) +} + +// nvml.DeviceGetMinMaxClockOfPState() +func (l *library) DeviceGetMinMaxClockOfPState(device Device, clockType ClockType, pstate Pstates) (uint32, uint32, Return) { + return device.GetMinMaxClockOfPState(clockType, pstate) +} + +func (device nvmlDevice) GetMinMaxClockOfPState(clockType ClockType, pstate Pstates) (uint32, uint32, Return) { + var minClockMHz, maxClockMHz uint32 + ret := nvmlDeviceGetMinMaxClockOfPState(device, clockType, pstate, &minClockMHz, &maxClockMHz) + return minClockMHz, maxClockMHz, ret +} + +// nvml.DeviceGetSupportedPerformanceStates() +func (l *library) DeviceGetSupportedPerformanceStates(device Device) ([]Pstates, Return) { + return device.GetSupportedPerformanceStates() +} + +func (device nvmlDevice) GetSupportedPerformanceStates() ([]Pstates, Return) { + pstates := make([]Pstates, MAX_GPU_PERF_PSTATES) + ret := nvmlDeviceGetSupportedPerformanceStates(device, &pstates[0], MAX_GPU_PERF_PSTATES) + for i := 0; i < MAX_GPU_PERF_PSTATES; i++ { + if pstates[i] == PSTATE_UNKNOWN { + return pstates[0:i], ret + } + } + return pstates, ret +} + +// nvml.DeviceGetTargetFanSpeed() +func (l *library) DeviceGetTargetFanSpeed(device Device, fan int) (int, Return) { + return device.GetTargetFanSpeed(fan) +} + +func (device nvmlDevice) GetTargetFanSpeed(fan int) (int, Return) { + var targetSpeed uint32 + ret := nvmlDeviceGetTargetFanSpeed(device, uint32(fan), &targetSpeed) + return int(targetSpeed), ret +} + +// nvml.DeviceGetMemClkVfOffset() +func (l *library) DeviceGetMemClkVfOffset(device Device) (int, Return) { + return device.GetMemClkVfOffset() +} + +func (device nvmlDevice) GetMemClkVfOffset() (int, Return) { + var offset int32 + ret := nvmlDeviceGetMemClkVfOffset(device, &offset) + return int(offset), ret +} + +// nvml.DeviceSetMemClkVfOffset() +func (l *library) DeviceSetMemClkVfOffset(device Device, offset int) Return { + return device.SetMemClkVfOffset(offset) +} + +func (device nvmlDevice) SetMemClkVfOffset(offset int) Return { + return nvmlDeviceSetMemClkVfOffset(device, int32(offset)) +} + +// nvml.DeviceGetGpcClkMinMaxVfOffset() +func (l *library) DeviceGetGpcClkMinMaxVfOffset(device Device) (int, int, Return) { + return device.GetGpcClkMinMaxVfOffset() +} + +func (device nvmlDevice) GetGpcClkMinMaxVfOffset() (int, int, Return) { + var minOffset, maxOffset int32 + ret := nvmlDeviceGetGpcClkMinMaxVfOffset(device, &minOffset, &maxOffset) + return int(minOffset), int(maxOffset), ret +} + +// nvml.DeviceGetMemClkMinMaxVfOffset() +func (l *library) DeviceGetMemClkMinMaxVfOffset(device Device) (int, int, Return) { + return device.GetMemClkMinMaxVfOffset() +} + +func (device nvmlDevice) GetMemClkMinMaxVfOffset() (int, int, Return) { + var minOffset, maxOffset int32 + ret := nvmlDeviceGetMemClkMinMaxVfOffset(device, &minOffset, &maxOffset) + return int(minOffset), int(maxOffset), ret +} + +// nvml.DeviceGetGpuMaxPcieLinkGeneration() +func (l *library) DeviceGetGpuMaxPcieLinkGeneration(device Device) (int, Return) { + return device.GetGpuMaxPcieLinkGeneration() +} + +func (device nvmlDevice) GetGpuMaxPcieLinkGeneration() (int, Return) { + var maxLinkGenDevice uint32 + ret := nvmlDeviceGetGpuMaxPcieLinkGeneration(device, &maxLinkGenDevice) + return int(maxLinkGenDevice), ret +} + +// nvml.DeviceGetFanControlPolicy_v2() +func (l *library) DeviceGetFanControlPolicy_v2(device Device, fan int) (FanControlPolicy, Return) { + return device.GetFanControlPolicy_v2(fan) +} + +func (device nvmlDevice) GetFanControlPolicy_v2(fan int) (FanControlPolicy, Return) { + var policy FanControlPolicy + ret := nvmlDeviceGetFanControlPolicy_v2(device, uint32(fan), &policy) + return policy, ret +} + +// nvml.DeviceSetFanControlPolicy() +func (l *library) DeviceSetFanControlPolicy(device Device, fan int, policy FanControlPolicy) Return { + return device.SetFanControlPolicy(fan, policy) +} + +func (device nvmlDevice) SetFanControlPolicy(fan int, policy FanControlPolicy) Return { + return nvmlDeviceSetFanControlPolicy(device, uint32(fan), policy) +} + +// nvml.DeviceClearFieldValues() +func (l *library) DeviceClearFieldValues(device Device, values []FieldValue) Return { + return device.ClearFieldValues(values) +} + +func (device nvmlDevice) ClearFieldValues(values []FieldValue) Return { + valuesCount := len(values) + return nvmlDeviceClearFieldValues(device, int32(valuesCount), &values[0]) +} + +// nvml.DeviceGetVgpuCapabilities() +func (l *library) DeviceGetVgpuCapabilities(device Device, capability DeviceVgpuCapability) (bool, Return) { + return device.GetVgpuCapabilities(capability) +} + +func (device nvmlDevice) GetVgpuCapabilities(capability DeviceVgpuCapability) (bool, Return) { + var capResult uint32 + ret := nvmlDeviceGetVgpuCapabilities(device, capability, &capResult) + return (capResult != 0), ret +} + +// nvml.DeviceGetVgpuSchedulerLog() +func (l *library) DeviceGetVgpuSchedulerLog(device Device) (VgpuSchedulerLog, Return) { + return device.GetVgpuSchedulerLog() +} + +func (device nvmlDevice) GetVgpuSchedulerLog() (VgpuSchedulerLog, Return) { + var pSchedulerLog VgpuSchedulerLog + ret := nvmlDeviceGetVgpuSchedulerLog(device, &pSchedulerLog) + return pSchedulerLog, ret +} + +// nvml.DeviceGetVgpuSchedulerState() +func (l *library) DeviceGetVgpuSchedulerState(device Device) (VgpuSchedulerGetState, Return) { + return device.GetVgpuSchedulerState() +} + +func (device nvmlDevice) GetVgpuSchedulerState() (VgpuSchedulerGetState, Return) { + var pSchedulerState VgpuSchedulerGetState + ret := nvmlDeviceGetVgpuSchedulerState(device, &pSchedulerState) + return pSchedulerState, ret +} + +// nvml.DeviceSetVgpuSchedulerState() +func (l *library) DeviceSetVgpuSchedulerState(device Device, pSchedulerState *VgpuSchedulerSetState) Return { + return device.SetVgpuSchedulerState(pSchedulerState) +} + +func (device nvmlDevice) SetVgpuSchedulerState(pSchedulerState *VgpuSchedulerSetState) Return { + return nvmlDeviceSetVgpuSchedulerState(device, pSchedulerState) +} + +// nvml.DeviceGetVgpuSchedulerCapabilities() +func (l *library) DeviceGetVgpuSchedulerCapabilities(device Device) (VgpuSchedulerCapabilities, Return) { + return device.GetVgpuSchedulerCapabilities() +} + +func (device nvmlDevice) GetVgpuSchedulerCapabilities() (VgpuSchedulerCapabilities, Return) { + var pCapabilities VgpuSchedulerCapabilities + ret := nvmlDeviceGetVgpuSchedulerCapabilities(device, &pCapabilities) + return pCapabilities, ret +} + +// nvml.GpuInstanceGetComputeInstancePossiblePlacements() +func (l *library) GpuInstanceGetComputeInstancePossiblePlacements(gpuInstance GpuInstance, info *ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) { + return gpuInstance.GetComputeInstancePossiblePlacements(info) +} + +func (gpuInstance nvmlGpuInstance) GetComputeInstancePossiblePlacements(info *ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) { + var count uint32 + ret := nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpuInstance, info.Id, nil, &count) + if ret != SUCCESS { + return nil, ret + } + if count == 0 { + return []ComputeInstancePlacement{}, ret + } + placementArray := make([]ComputeInstancePlacement, count) + ret = nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpuInstance, info.Id, &placementArray[0], &count) + return placementArray, ret +} + +// nvml.GpuInstanceCreateComputeInstanceWithPlacement() +func (l *library) GpuInstanceCreateComputeInstanceWithPlacement(gpuInstance GpuInstance, info *ComputeInstanceProfileInfo, placement *ComputeInstancePlacement) (ComputeInstance, Return) { + return gpuInstance.CreateComputeInstanceWithPlacement(info, placement) +} + +func (gpuInstance nvmlGpuInstance) CreateComputeInstanceWithPlacement(info *ComputeInstanceProfileInfo, placement *ComputeInstancePlacement) (ComputeInstance, Return) { + var computeInstance nvmlComputeInstance + ret := nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpuInstance, info.Id, placement, &computeInstance) + return computeInstance, ret +} + +// nvml.DeviceGetGpuFabricInfo() +func (l *library) DeviceGetGpuFabricInfo(device Device) (GpuFabricInfo, Return) { + return device.GetGpuFabricInfo() +} + +func (device nvmlDevice) GetGpuFabricInfo() (GpuFabricInfo, Return) { + var gpuFabricInfo GpuFabricInfo + ret := nvmlDeviceGetGpuFabricInfo(device, &gpuFabricInfo) + return gpuFabricInfo, ret +} + +// nvml.DeviceSetNvLinkDeviceLowPowerThreshold() +func (l *library) DeviceSetNvLinkDeviceLowPowerThreshold(device Device, info *NvLinkPowerThres) Return { + return device.SetNvLinkDeviceLowPowerThreshold(info) +} + +func (device nvmlDevice) SetNvLinkDeviceLowPowerThreshold(info *NvLinkPowerThres) Return { + return nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info) +} + +// nvml.DeviceGetModuleId() +func (l *library) DeviceGetModuleId(device Device) (int, Return) { + return device.GetModuleId() +} + +func (device nvmlDevice) GetModuleId() (int, Return) { + var moduleID uint32 + ret := nvmlDeviceGetModuleId(device, &moduleID) + return int(moduleID), ret +} + +// nvml.DeviceGetCurrentClocksEventReasons() +func (l *library) DeviceGetCurrentClocksEventReasons(device Device) (uint64, Return) { + return device.GetCurrentClocksEventReasons() +} + +func (device nvmlDevice) GetCurrentClocksEventReasons() (uint64, Return) { + var clocksEventReasons uint64 + ret := nvmlDeviceGetCurrentClocksEventReasons(device, &clocksEventReasons) + return clocksEventReasons, ret +} + +// nvml.DeviceGetSupportedClocksEventReasons() +func (l *library) DeviceGetSupportedClocksEventReasons(device Device) (uint64, Return) { + return device.GetSupportedClocksEventReasons() +} + +func (device nvmlDevice) GetSupportedClocksEventReasons() (uint64, Return) { + var supportedClocksEventReasons uint64 + ret := nvmlDeviceGetSupportedClocksEventReasons(device, &supportedClocksEventReasons) + return supportedClocksEventReasons, ret +} + +// nvml.DeviceGetJpgUtilization() +func (l *library) DeviceGetJpgUtilization(device Device) (uint32, uint32, Return) { + return device.GetJpgUtilization() +} + +func (device nvmlDevice) GetJpgUtilization() (uint32, uint32, Return) { + var utilization, samplingPeriodUs uint32 + ret := nvmlDeviceGetJpgUtilization(device, &utilization, &samplingPeriodUs) + return utilization, samplingPeriodUs, ret +} + +// nvml.DeviceGetOfaUtilization() +func (l *library) DeviceGetOfaUtilization(device Device) (uint32, uint32, Return) { + return device.GetOfaUtilization() +} + +func (device nvmlDevice) GetOfaUtilization() (uint32, uint32, Return) { + var utilization, samplingPeriodUs uint32 + ret := nvmlDeviceGetOfaUtilization(device, &utilization, &samplingPeriodUs) + return utilization, samplingPeriodUs, ret +} + +// nvml.DeviceGetRunningProcessDetailList() +func (l *library) DeviceGetRunningProcessDetailList(device Device) (ProcessDetailList, Return) { + return device.GetRunningProcessDetailList() +} + +func (device nvmlDevice) GetRunningProcessDetailList() (ProcessDetailList, Return) { + var plist ProcessDetailList + ret := nvmlDeviceGetRunningProcessDetailList(device, &plist) + return plist, ret +} + +// nvml.DeviceGetConfComputeMemSizeInfo() +func (l *library) DeviceGetConfComputeMemSizeInfo(device Device) (ConfComputeMemSizeInfo, Return) { + return device.GetConfComputeMemSizeInfo() +} + +func (device nvmlDevice) GetConfComputeMemSizeInfo() (ConfComputeMemSizeInfo, Return) { + var memInfo ConfComputeMemSizeInfo + ret := nvmlDeviceGetConfComputeMemSizeInfo(device, &memInfo) + return memInfo, ret +} + +// nvml.DeviceGetConfComputeProtectedMemoryUsage() +func (l *library) DeviceGetConfComputeProtectedMemoryUsage(device Device) (Memory, Return) { + return device.GetConfComputeProtectedMemoryUsage() +} + +func (device nvmlDevice) GetConfComputeProtectedMemoryUsage() (Memory, Return) { + var memory Memory + ret := nvmlDeviceGetConfComputeProtectedMemoryUsage(device, &memory) + return memory, ret +} + +// nvml.DeviceGetConfComputeGpuCertificate() +func (l *library) DeviceGetConfComputeGpuCertificate(device Device) (ConfComputeGpuCertificate, Return) { + return device.GetConfComputeGpuCertificate() +} + +func (device nvmlDevice) GetConfComputeGpuCertificate() (ConfComputeGpuCertificate, Return) { + var gpuCert ConfComputeGpuCertificate + ret := nvmlDeviceGetConfComputeGpuCertificate(device, &gpuCert) + return gpuCert, ret +} + +// nvml.DeviceGetConfComputeGpuAttestationReport() +func (l *library) DeviceGetConfComputeGpuAttestationReport(device Device) (ConfComputeGpuAttestationReport, Return) { + return device.GetConfComputeGpuAttestationReport() +} + +func (device nvmlDevice) GetConfComputeGpuAttestationReport() (ConfComputeGpuAttestationReport, Return) { + var gpuAtstReport ConfComputeGpuAttestationReport + ret := nvmlDeviceGetConfComputeGpuAttestationReport(device, &gpuAtstReport) + return gpuAtstReport, ret +} + +// nvml.DeviceSetConfComputeUnprotectedMemSize() +func (l *library) DeviceSetConfComputeUnprotectedMemSize(device Device, sizeKiB uint64) Return { + return device.SetConfComputeUnprotectedMemSize(sizeKiB) +} + +func (device nvmlDevice) SetConfComputeUnprotectedMemSize(sizeKiB uint64) Return { + return nvmlDeviceSetConfComputeUnprotectedMemSize(device, sizeKiB) +} + +// nvml.DeviceSetPowerManagementLimit_v2() +func (l *library) DeviceSetPowerManagementLimit_v2(device Device, powerValue *PowerValue_v2) Return { + return device.SetPowerManagementLimit_v2(powerValue) +} + +func (device nvmlDevice) SetPowerManagementLimit_v2(powerValue *PowerValue_v2) Return { + return nvmlDeviceSetPowerManagementLimit_v2(device, powerValue) +} + +// nvml.DeviceGetC2cModeInfoV() +type C2cModeInfoHandler struct { + device nvmlDevice +} + +func (handler C2cModeInfoHandler) V1() (C2cModeInfo_v1, Return) { + var c2cModeInfo C2cModeInfo_v1 + ret := nvmlDeviceGetC2cModeInfoV(handler.device, &c2cModeInfo) + return c2cModeInfo, ret +} + +func (l *library) DeviceGetC2cModeInfoV(device Device) C2cModeInfoHandler { + return device.GetC2cModeInfoV() +} + +func (device nvmlDevice) GetC2cModeInfoV() C2cModeInfoHandler { + return C2cModeInfoHandler{device} +} + +// nvml.DeviceGetLastBBXFlushTime() +func (l *library) DeviceGetLastBBXFlushTime(device Device) (uint64, uint, Return) { + return device.GetLastBBXFlushTime() +} + +func (device nvmlDevice) GetLastBBXFlushTime() (uint64, uint, Return) { + var timestamp uint64 + var durationUs uint + ret := nvmlDeviceGetLastBBXFlushTime(device, ×tamp, &durationUs) + return timestamp, durationUs, ret +} + +// nvml.DeviceGetNumaNodeId() +func (l *library) DeviceGetNumaNodeId(device Device) (int, Return) { + return device.GetNumaNodeId() +} + +func (device nvmlDevice) GetNumaNodeId() (int, Return) { + var node uint32 + ret := nvmlDeviceGetNumaNodeId(device, &node) + return int(node), ret +} + +// nvml.DeviceGetPciInfoExt() +func (l *library) DeviceGetPciInfoExt(device Device) (PciInfoExt, Return) { + return device.GetPciInfoExt() +} + +func (device nvmlDevice) GetPciInfoExt() (PciInfoExt, Return) { + var pciInfo PciInfoExt + ret := nvmlDeviceGetPciInfoExt(device, &pciInfo) + return pciInfo, ret +} + +// nvml.DeviceGetGpuFabricInfoV() +type GpuFabricInfoHandler struct { + device nvmlDevice +} + +func (handler GpuFabricInfoHandler) V1() (GpuFabricInfo, Return) { + return handler.device.GetGpuFabricInfo() +} + +func (handler GpuFabricInfoHandler) V2() (GpuFabricInfo_v2, Return) { + var info GpuFabricInfoV + info.Version = STRUCT_VERSION(info, 2) + ret := nvmlDeviceGetGpuFabricInfoV(handler.device, &info) + return GpuFabricInfo_v2(info), ret +} + +func (l *library) DeviceGetGpuFabricInfoV(device Device) GpuFabricInfoHandler { + return device.GetGpuFabricInfoV() +} + +func (device nvmlDevice) GetGpuFabricInfoV() GpuFabricInfoHandler { + return GpuFabricInfoHandler{device} +} + +// nvml.DeviceGetProcessesUtilizationInfo() +func (l *library) DeviceGetProcessesUtilizationInfo(device Device) (ProcessesUtilizationInfo, Return) { + return device.GetProcessesUtilizationInfo() +} + +func (device nvmlDevice) GetProcessesUtilizationInfo() (ProcessesUtilizationInfo, Return) { + var processesUtilInfo ProcessesUtilizationInfo + ret := nvmlDeviceGetProcessesUtilizationInfo(device, &processesUtilInfo) + return processesUtilInfo, ret +} + +// nvml.DeviceGetVgpuHeterogeneousMode() +func (l *library) DeviceGetVgpuHeterogeneousMode(device Device) (VgpuHeterogeneousMode, Return) { + return device.GetVgpuHeterogeneousMode() +} + +func (device nvmlDevice) GetVgpuHeterogeneousMode() (VgpuHeterogeneousMode, Return) { + var heterogeneousMode VgpuHeterogeneousMode + ret := nvmlDeviceGetVgpuHeterogeneousMode(device, &heterogeneousMode) + return heterogeneousMode, ret +} + +// nvml.DeviceSetVgpuHeterogeneousMode() +func (l *library) DeviceSetVgpuHeterogeneousMode(device Device, heterogeneousMode VgpuHeterogeneousMode) Return { + return device.SetVgpuHeterogeneousMode(heterogeneousMode) +} + +func (device nvmlDevice) SetVgpuHeterogeneousMode(heterogeneousMode VgpuHeterogeneousMode) Return { + ret := nvmlDeviceSetVgpuHeterogeneousMode(device, &heterogeneousMode) + return ret +} + +// nvml.DeviceGetVgpuTypeSupportedPlacements() +func (l *library) DeviceGetVgpuTypeSupportedPlacements(device Device, vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return device.GetVgpuTypeSupportedPlacements(vgpuTypeId) +} + +func (device nvmlDevice) GetVgpuTypeSupportedPlacements(vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return vgpuTypeId.GetSupportedPlacements(device) +} + +func (vgpuTypeId nvmlVgpuTypeId) GetSupportedPlacements(device Device) (VgpuPlacementList, Return) { + var placementList VgpuPlacementList + ret := nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDeviceHandle(device), vgpuTypeId, &placementList) + return placementList, ret +} + +// nvml.DeviceGetVgpuTypeCreatablePlacements() +func (l *library) DeviceGetVgpuTypeCreatablePlacements(device Device, vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return device.GetVgpuTypeCreatablePlacements(vgpuTypeId) +} + +func (device nvmlDevice) GetVgpuTypeCreatablePlacements(vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return vgpuTypeId.GetCreatablePlacements(device) +} + +func (vgpuTypeId nvmlVgpuTypeId) GetCreatablePlacements(device Device) (VgpuPlacementList, Return) { + var placementList VgpuPlacementList + ret := nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDeviceHandle(device), vgpuTypeId, &placementList) + return placementList, ret +} + +// nvml.DeviceSetVgpuCapabilities() +func (l *library) DeviceSetVgpuCapabilities(device Device, capability DeviceVgpuCapability, state EnableState) Return { + return device.SetVgpuCapabilities(capability, state) +} + +func (device nvmlDevice) SetVgpuCapabilities(capability DeviceVgpuCapability, state EnableState) Return { + ret := nvmlDeviceSetVgpuCapabilities(device, capability, state) + return ret +} + +// nvml.DeviceGetVgpuInstancesUtilizationInfo() +func (l *library) DeviceGetVgpuInstancesUtilizationInfo(device Device) (VgpuInstancesUtilizationInfo, Return) { + return device.GetVgpuInstancesUtilizationInfo() +} + +func (device nvmlDevice) GetVgpuInstancesUtilizationInfo() (VgpuInstancesUtilizationInfo, Return) { + var vgpuUtilInfo VgpuInstancesUtilizationInfo + ret := nvmlDeviceGetVgpuInstancesUtilizationInfo(device, &vgpuUtilInfo) + return vgpuUtilInfo, ret +} + +// nvml.DeviceGetVgpuProcessesUtilizationInfo() +func (l *library) DeviceGetVgpuProcessesUtilizationInfo(device Device) (VgpuProcessesUtilizationInfo, Return) { + return device.GetVgpuProcessesUtilizationInfo() +} + +func (device nvmlDevice) GetVgpuProcessesUtilizationInfo() (VgpuProcessesUtilizationInfo, Return) { + var vgpuProcUtilInfo VgpuProcessesUtilizationInfo + ret := nvmlDeviceGetVgpuProcessesUtilizationInfo(device, &vgpuProcUtilInfo) + return vgpuProcUtilInfo, ret +} + +// nvml.DeviceGetSramEccErrorStatus() +func (l *library) DeviceGetSramEccErrorStatus(device Device) (EccSramErrorStatus, Return) { + return device.GetSramEccErrorStatus() +} + +func (device nvmlDevice) GetSramEccErrorStatus() (EccSramErrorStatus, Return) { + var status EccSramErrorStatus + ret := nvmlDeviceGetSramEccErrorStatus(device, &status) + return status, ret +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go new file mode 100644 index 00000000000..c2ce2e37e0a --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go @@ -0,0 +1,21 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +/* +Package NVML bindings +*/ +package nvml diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/dynamicLibrary_mock.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/dynamicLibrary_mock.go new file mode 100644 index 00000000000..b785431c72a --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/dynamicLibrary_mock.go @@ -0,0 +1,157 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package nvml + +import ( + "sync" +) + +// Ensure, that dynamicLibraryMock does implement dynamicLibrary. +// If this is not the case, regenerate this file with moq. +var _ dynamicLibrary = &dynamicLibraryMock{} + +// dynamicLibraryMock is a mock implementation of dynamicLibrary. +// +// func TestSomethingThatUsesdynamicLibrary(t *testing.T) { +// +// // make and configure a mocked dynamicLibrary +// mockeddynamicLibrary := &dynamicLibraryMock{ +// CloseFunc: func() error { +// panic("mock out the Close method") +// }, +// LookupFunc: func(s string) error { +// panic("mock out the Lookup method") +// }, +// OpenFunc: func() error { +// panic("mock out the Open method") +// }, +// } +// +// // use mockeddynamicLibrary in code that requires dynamicLibrary +// // and then make assertions. +// +// } +type dynamicLibraryMock struct { + // CloseFunc mocks the Close method. + CloseFunc func() error + + // LookupFunc mocks the Lookup method. + LookupFunc func(s string) error + + // OpenFunc mocks the Open method. + OpenFunc func() error + + // calls tracks calls to the methods. + calls struct { + // Close holds details about calls to the Close method. + Close []struct { + } + // Lookup holds details about calls to the Lookup method. + Lookup []struct { + // S is the s argument value. + S string + } + // Open holds details about calls to the Open method. + Open []struct { + } + } + lockClose sync.RWMutex + lockLookup sync.RWMutex + lockOpen sync.RWMutex +} + +// Close calls CloseFunc. +func (mock *dynamicLibraryMock) Close() error { + callInfo := struct { + }{} + mock.lockClose.Lock() + mock.calls.Close = append(mock.calls.Close, callInfo) + mock.lockClose.Unlock() + if mock.CloseFunc == nil { + var ( + errOut error + ) + return errOut + } + return mock.CloseFunc() +} + +// CloseCalls gets all the calls that were made to Close. +// Check the length with: +// +// len(mockeddynamicLibrary.CloseCalls()) +func (mock *dynamicLibraryMock) CloseCalls() []struct { +} { + var calls []struct { + } + mock.lockClose.RLock() + calls = mock.calls.Close + mock.lockClose.RUnlock() + return calls +} + +// Lookup calls LookupFunc. +func (mock *dynamicLibraryMock) Lookup(s string) error { + callInfo := struct { + S string + }{ + S: s, + } + mock.lockLookup.Lock() + mock.calls.Lookup = append(mock.calls.Lookup, callInfo) + mock.lockLookup.Unlock() + if mock.LookupFunc == nil { + var ( + errOut error + ) + return errOut + } + return mock.LookupFunc(s) +} + +// LookupCalls gets all the calls that were made to Lookup. +// Check the length with: +// +// len(mockeddynamicLibrary.LookupCalls()) +func (mock *dynamicLibraryMock) LookupCalls() []struct { + S string +} { + var calls []struct { + S string + } + mock.lockLookup.RLock() + calls = mock.calls.Lookup + mock.lockLookup.RUnlock() + return calls +} + +// Open calls OpenFunc. +func (mock *dynamicLibraryMock) Open() error { + callInfo := struct { + }{} + mock.lockOpen.Lock() + mock.calls.Open = append(mock.calls.Open, callInfo) + mock.lockOpen.Unlock() + if mock.OpenFunc == nil { + var ( + errOut error + ) + return errOut + } + return mock.OpenFunc() +} + +// OpenCalls gets all the calls that were made to Open. +// Check the length with: +// +// len(mockeddynamicLibrary.OpenCalls()) +func (mock *dynamicLibraryMock) OpenCalls() []struct { +} { + var calls []struct { + } + mock.lockOpen.RLock() + calls = mock.calls.Open + mock.lockOpen.RUnlock() + return calls +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go new file mode 100644 index 00000000000..933b4deadcf --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go @@ -0,0 +1,73 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// EventData includes an interface type for Device instead of nvmlDevice +type EventData struct { + Device Device + EventType uint64 + EventData uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +func (e EventData) convert() nvmlEventData { + out := nvmlEventData{ + Device: e.Device.(nvmlDevice), + EventType: e.EventType, + EventData: e.EventData, + GpuInstanceId: e.GpuInstanceId, + ComputeInstanceId: e.ComputeInstanceId, + } + return out +} + +func (e nvmlEventData) convert() EventData { + out := EventData{ + Device: e.Device, + EventType: e.EventType, + EventData: e.EventData, + GpuInstanceId: e.GpuInstanceId, + ComputeInstanceId: e.ComputeInstanceId, + } + return out +} + +// nvml.EventSetCreate() +func (l *library) EventSetCreate() (EventSet, Return) { + var Set nvmlEventSet + ret := nvmlEventSetCreate(&Set) + return Set, ret +} + +// nvml.EventSetWait() +func (l *library) EventSetWait(set EventSet, timeoutms uint32) (EventData, Return) { + return set.Wait(timeoutms) +} + +func (set nvmlEventSet) Wait(timeoutms uint32) (EventData, Return) { + var data nvmlEventData + ret := nvmlEventSetWait(set, &data, timeoutms) + return data.convert(), ret +} + +// nvml.EventSetFree() +func (l *library) EventSetFree(set EventSet) Return { + return set.Free() +} + +func (set nvmlEventSet) Free() Return { + return nvmlEventSetFree(set) +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go new file mode 100644 index 00000000000..7f8995cc71b --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go @@ -0,0 +1,172 @@ +// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// GpmMetricsGetType includes interface types for GpmSample instead of nvmlGpmSample +type GpmMetricsGetType struct { + Version uint32 + NumMetrics uint32 + Sample1 GpmSample + Sample2 GpmSample + Metrics [98]GpmMetric +} + +func (g *GpmMetricsGetType) convert() *nvmlGpmMetricsGetType { + out := &nvmlGpmMetricsGetType{ + Version: g.Version, + NumMetrics: g.NumMetrics, + Sample1: g.Sample1.(nvmlGpmSample), + Sample2: g.Sample2.(nvmlGpmSample), + } + for i := range g.Metrics { + out.Metrics[i] = g.Metrics[i] + } + return out +} + +func (g *nvmlGpmMetricsGetType) convert() *GpmMetricsGetType { + out := &GpmMetricsGetType{ + Version: g.Version, + NumMetrics: g.NumMetrics, + Sample1: g.Sample1, + Sample2: g.Sample2, + } + for i := range g.Metrics { + out.Metrics[i] = g.Metrics[i] + } + return out +} + +// nvml.GpmMetricsGet() +type GpmMetricsGetVType struct { + metricsGet *GpmMetricsGetType +} + +func (l *library) GpmMetricsGetV(metricsGet *GpmMetricsGetType) GpmMetricsGetVType { + return GpmMetricsGetVType{metricsGet} +} + +// nvmlGpmMetricsGetStub is a stub function that can be overridden for testing. +var nvmlGpmMetricsGetStub = nvmlGpmMetricsGet + +func (metricsGetV GpmMetricsGetVType) V1() Return { + metricsGetV.metricsGet.Version = 1 + return gpmMetricsGet(metricsGetV.metricsGet) +} + +func (l *library) GpmMetricsGet(metricsGet *GpmMetricsGetType) Return { + metricsGet.Version = GPM_METRICS_GET_VERSION + return gpmMetricsGet(metricsGet) +} + +func gpmMetricsGet(metricsGet *GpmMetricsGetType) Return { + nvmlMetricsGet := metricsGet.convert() + ret := nvmlGpmMetricsGetStub(nvmlMetricsGet) + *metricsGet = *nvmlMetricsGet.convert() + return ret +} + +// nvml.GpmSampleFree() +func (l *library) GpmSampleFree(gpmSample GpmSample) Return { + return gpmSample.Free() +} + +func (gpmSample nvmlGpmSample) Free() Return { + return nvmlGpmSampleFree(gpmSample) +} + +// nvml.GpmSampleAlloc() +func (l *library) GpmSampleAlloc() (GpmSample, Return) { + var gpmSample nvmlGpmSample + ret := nvmlGpmSampleAlloc(&gpmSample) + return gpmSample, ret +} + +// nvml.GpmSampleGet() +func (l *library) GpmSampleGet(device Device, gpmSample GpmSample) Return { + return gpmSample.Get(device) +} + +func (device nvmlDevice) GpmSampleGet(gpmSample GpmSample) Return { + return gpmSample.Get(device) +} + +func (gpmSample nvmlGpmSample) Get(device Device) Return { + return nvmlGpmSampleGet(nvmlDeviceHandle(device), gpmSample) +} + +// nvml.GpmQueryDeviceSupport() +type GpmSupportV struct { + device nvmlDevice +} + +func (l *library) GpmQueryDeviceSupportV(device Device) GpmSupportV { + return device.GpmQueryDeviceSupportV() +} + +func (device nvmlDevice) GpmQueryDeviceSupportV() GpmSupportV { + return GpmSupportV{device} +} + +func (gpmSupportV GpmSupportV) V1() (GpmSupport, Return) { + var gpmSupport GpmSupport + gpmSupport.Version = 1 + ret := nvmlGpmQueryDeviceSupport(gpmSupportV.device, &gpmSupport) + return gpmSupport, ret +} + +func (l *library) GpmQueryDeviceSupport(device Device) (GpmSupport, Return) { + return device.GpmQueryDeviceSupport() +} + +func (device nvmlDevice) GpmQueryDeviceSupport() (GpmSupport, Return) { + var gpmSupport GpmSupport + gpmSupport.Version = GPM_SUPPORT_VERSION + ret := nvmlGpmQueryDeviceSupport(device, &gpmSupport) + return gpmSupport, ret +} + +// nvml.GpmMigSampleGet() +func (l *library) GpmMigSampleGet(device Device, gpuInstanceId int, gpmSample GpmSample) Return { + return gpmSample.MigGet(device, gpuInstanceId) +} + +func (device nvmlDevice) GpmMigSampleGet(gpuInstanceId int, gpmSample GpmSample) Return { + return gpmSample.MigGet(device, gpuInstanceId) +} + +func (gpmSample nvmlGpmSample) MigGet(device Device, gpuInstanceId int) Return { + return nvmlGpmMigSampleGet(nvmlDeviceHandle(device), uint32(gpuInstanceId), gpmSample) +} + +// nvml.GpmQueryIfStreamingEnabled() +func (l *library) GpmQueryIfStreamingEnabled(device Device) (uint32, Return) { + return device.GpmQueryIfStreamingEnabled() +} + +func (device nvmlDevice) GpmQueryIfStreamingEnabled() (uint32, Return) { + var state uint32 + ret := nvmlGpmQueryIfStreamingEnabled(device, &state) + return state, ret +} + +// nvml.GpmSetStreamingEnabled() +func (l *library) GpmSetStreamingEnabled(device Device, state uint32) Return { + return device.GpmSetStreamingEnabled(state) +} + +func (device nvmlDevice) GpmSetStreamingEnabled(state uint32) Return { + return nvmlGpmSetStreamingEnabled(device, state) +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go new file mode 100644 index 00000000000..06e64441930 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go @@ -0,0 +1,48 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import "C" + +// nvml.Init() +func (l *library) Init() Return { + if err := l.load(); err != nil { + return ERROR_LIBRARY_NOT_FOUND + } + return nvmlInit() +} + +// nvml.InitWithFlags() +func (l *library) InitWithFlags(flags uint32) Return { + if err := l.load(); err != nil { + return ERROR_LIBRARY_NOT_FOUND + } + return nvmlInitWithFlags(flags) +} + +// nvml.Shutdown() +func (l *library) Shutdown() Return { + ret := nvmlShutdown() + if ret != SUCCESS { + return ret + } + + err := l.close() + if err != nil { + return ERROR_UNKNOWN + } + + return ret +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go new file mode 100644 index 00000000000..bc4c3de5ef3 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go @@ -0,0 +1,291 @@ +/** +# Copyright 2023 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package nvml + +import ( + "errors" + "fmt" + "sync" + + "github.com/NVIDIA/go-nvml/pkg/dl" +) + +import "C" + +const ( + defaultNvmlLibraryName = "libnvidia-ml.so.1" + defaultNvmlLibraryLoadFlags = dl.RTLD_LAZY | dl.RTLD_GLOBAL +) + +var errLibraryNotLoaded = errors.New("library not loaded") +var errLibraryAlreadyLoaded = errors.New("library already loaded") + +// dynamicLibrary is an interface for abstacting the underlying library. +// This also allows for mocking and testing. + +//go:generate moq -stub -out dynamicLibrary_mock.go . dynamicLibrary +type dynamicLibrary interface { + Lookup(string) error + Open() error + Close() error +} + +// library represents an nvml library. +// This includes a reference to the underlying DynamicLibrary +type library struct { + sync.Mutex + path string + refcount refcount + dl dynamicLibrary +} + +var _ Interface = (*library)(nil) + +// libnvml is a global instance of the nvml library. +var libnvml = newLibrary() + +func New(opts ...LibraryOption) Interface { + return newLibrary(opts...) +} + +func newLibrary(opts ...LibraryOption) *library { + l := &library{} + l.init(opts...) + return l +} + +func (l *library) init(opts ...LibraryOption) { + o := libraryOptions{} + for _, opt := range opts { + opt(&o) + } + + if o.path == "" { + o.path = defaultNvmlLibraryName + } + if o.flags == 0 { + o.flags = defaultNvmlLibraryLoadFlags + } + + l.path = o.path + l.dl = dl.New(o.path, o.flags) +} + +func (l *library) Extensions() ExtendedInterface { + return l +} + +// LookupSymbol checks whether the specified library symbol exists in the library. +// Note that this requires that the library be loaded. +func (l *library) LookupSymbol(name string) error { + if l == nil || l.refcount == 0 { + return fmt.Errorf("error looking up %s: %w", name, errLibraryNotLoaded) + } + return l.dl.Lookup(name) +} + +// load initializes the library and updates the versioned symbols. +// Multiple calls to an already loaded library will return without error. +func (l *library) load() (rerr error) { + l.Lock() + defer l.Unlock() + + defer func() { l.refcount.IncOnNoError(rerr) }() + if l.refcount > 0 { + return nil + } + + if err := l.dl.Open(); err != nil { + return fmt.Errorf("error opening %s: %w", l.path, err) + } + + // Update the errorStringFunc to point to nvml.ErrorString + errorStringFunc = nvmlErrorString + + // Update all versioned symbols + l.updateVersionedSymbols() + + return nil +} + +// close the underlying library and ensure that the global pointer to the +// library is set to nil to ensure that subsequent calls to open will reinitialize it. +// Multiple calls to an already closed nvml library will return without error. +func (l *library) close() (rerr error) { + l.Lock() + defer l.Unlock() + + defer func() { l.refcount.DecOnNoError(rerr) }() + if l.refcount != 1 { + return nil + } + + if err := l.dl.Close(); err != nil { + return fmt.Errorf("error closing %s: %w", l.path, err) + } + + // Update the errorStringFunc to point to defaultErrorStringFunc + errorStringFunc = defaultErrorStringFunc + + return nil +} + +// Default all versioned APIs to v1 (to infer the types) +var nvmlInit = nvmlInit_v1 +var nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v1 +var nvmlDeviceGetCount = nvmlDeviceGetCount_v1 +var nvmlDeviceGetHandleByIndex = nvmlDeviceGetHandleByIndex_v1 +var nvmlDeviceGetHandleByPciBusId = nvmlDeviceGetHandleByPciBusId_v1 +var nvmlDeviceGetNvLinkRemotePciInfo = nvmlDeviceGetNvLinkRemotePciInfo_v1 +var nvmlDeviceRemoveGpu = nvmlDeviceRemoveGpu_v1 +var nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v1 +var nvmlEventSetWait = nvmlEventSetWait_v1 +var nvmlDeviceGetAttributes = nvmlDeviceGetAttributes_v1 +var nvmlComputeInstanceGetInfo = nvmlComputeInstanceGetInfo_v1 +var deviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v1 +var deviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v1 +var deviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v1 +var GetBlacklistDeviceCount = GetExcludedDeviceCount +var GetBlacklistDeviceInfoByIndex = GetExcludedDeviceInfoByIndex +var nvmlDeviceGetGpuInstancePossiblePlacements = nvmlDeviceGetGpuInstancePossiblePlacements_v1 +var nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v1 + +// BlacklistDeviceInfo was replaced by ExcludedDeviceInfo +type BlacklistDeviceInfo = ExcludedDeviceInfo + +type ProcessInfo_v1Slice []ProcessInfo_v1 +type ProcessInfo_v2Slice []ProcessInfo_v2 + +func (pis ProcessInfo_v1Slice) ToProcessInfoSlice() []ProcessInfo { + var newInfos []ProcessInfo + for _, pi := range pis { + info := ProcessInfo{ + Pid: pi.Pid, + UsedGpuMemory: pi.UsedGpuMemory, + GpuInstanceId: 0xFFFFFFFF, // GPU instance ID is invalid in v1 + ComputeInstanceId: 0xFFFFFFFF, // Compute instance ID is invalid in v1 + } + newInfos = append(newInfos, info) + } + return newInfos +} + +func (pis ProcessInfo_v2Slice) ToProcessInfoSlice() []ProcessInfo { + var newInfos []ProcessInfo + for _, pi := range pis { + info := ProcessInfo(pi) + newInfos = append(newInfos, info) + } + return newInfos +} + +// updateVersionedSymbols checks for versioned symbols in the loaded dynamic library. +// If newer versioned symbols exist, these replace the default `v1` symbols initialized above. +// When new versioned symbols are added, these would have to be initialized above and have +// corresponding checks and subsequent assignments added below. +func (l *library) updateVersionedSymbols() { + err := l.dl.Lookup("nvmlInit_v2") + if err == nil { + nvmlInit = nvmlInit_v2 + } + err = l.dl.Lookup("nvmlDeviceGetPciInfo_v2") + if err == nil { + nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v2 + } + err = l.dl.Lookup("nvmlDeviceGetPciInfo_v3") + if err == nil { + nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v3 + } + err = l.dl.Lookup("nvmlDeviceGetCount_v2") + if err == nil { + nvmlDeviceGetCount = nvmlDeviceGetCount_v2 + } + err = l.dl.Lookup("nvmlDeviceGetHandleByIndex_v2") + if err == nil { + nvmlDeviceGetHandleByIndex = nvmlDeviceGetHandleByIndex_v2 + } + err = l.dl.Lookup("nvmlDeviceGetHandleByPciBusId_v2") + if err == nil { + nvmlDeviceGetHandleByPciBusId = nvmlDeviceGetHandleByPciBusId_v2 + } + err = l.dl.Lookup("nvmlDeviceGetNvLinkRemotePciInfo_v2") + if err == nil { + nvmlDeviceGetNvLinkRemotePciInfo = nvmlDeviceGetNvLinkRemotePciInfo_v2 + } + // Unable to overwrite nvmlDeviceRemoveGpu() because the v2 function takes + // a different set of parameters than the v1 function. + //err = l.dl.Lookup("nvmlDeviceRemoveGpu_v2") + //if err == nil { + // nvmlDeviceRemoveGpu = nvmlDeviceRemoveGpu_v2 + //} + err = l.dl.Lookup("nvmlDeviceGetGridLicensableFeatures_v2") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v2 + } + err = l.dl.Lookup("nvmlDeviceGetGridLicensableFeatures_v3") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v3 + } + err = l.dl.Lookup("nvmlDeviceGetGridLicensableFeatures_v4") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v4 + } + err = l.dl.Lookup("nvmlEventSetWait_v2") + if err == nil { + nvmlEventSetWait = nvmlEventSetWait_v2 + } + err = l.dl.Lookup("nvmlDeviceGetAttributes_v2") + if err == nil { + nvmlDeviceGetAttributes = nvmlDeviceGetAttributes_v2 + } + err = l.dl.Lookup("nvmlComputeInstanceGetInfo_v2") + if err == nil { + nvmlComputeInstanceGetInfo = nvmlComputeInstanceGetInfo_v2 + } + err = l.dl.Lookup("nvmlDeviceGetComputeRunningProcesses_v2") + if err == nil { + deviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v2 + } + err = l.dl.Lookup("nvmlDeviceGetComputeRunningProcesses_v3") + if err == nil { + deviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v3 + } + err = l.dl.Lookup("nvmlDeviceGetGraphicsRunningProcesses_v2") + if err == nil { + deviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v2 + } + err = l.dl.Lookup("nvmlDeviceGetGraphicsRunningProcesses_v3") + if err == nil { + deviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v3 + } + err = l.dl.Lookup("nvmlDeviceGetMPSComputeRunningProcesses_v2") + if err == nil { + deviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v2 + } + err = l.dl.Lookup("nvmlDeviceGetMPSComputeRunningProcesses_v3") + if err == nil { + deviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v3 + } + err = l.dl.Lookup("nvmlDeviceGetGpuInstancePossiblePlacements_v2") + if err == nil { + nvmlDeviceGetGpuInstancePossiblePlacements = nvmlDeviceGetGpuInstancePossiblePlacements_v2 + } + err = l.dl.Lookup("nvmlVgpuInstanceGetLicenseInfo_v2") + if err == nil { + nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v2 + } +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go new file mode 100644 index 00000000000..6ba290c5fdc --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go @@ -0,0 +1,3310 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +package nvml + +/* +#cgo linux LDFLAGS: -Wl,--export-dynamic -Wl,--unresolved-symbols=ignore-in-object-files +#cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup +#cgo CFLAGS: -DNVML_NO_UNVERSIONED_FUNC_DEFS=1 +#include "nvml.h" +#include +#include "cgo_helpers.h" +*/ +import "C" +import "unsafe" + +// nvmlInit_v2 function as declared in nvml/nvml.h +func nvmlInit_v2() Return { + __ret := C.nvmlInit_v2() + __v := (Return)(__ret) + return __v +} + +// nvmlInitWithFlags function as declared in nvml/nvml.h +func nvmlInitWithFlags(Flags uint32) Return { + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlInitWithFlags(cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlShutdown function as declared in nvml/nvml.h +func nvmlShutdown() Return { + __ret := C.nvmlShutdown() + __v := (Return)(__ret) + return __v +} + +// nvmlErrorString function as declared in nvml/nvml.h +func nvmlErrorString(Result Return) string { + cResult, _ := (C.nvmlReturn_t)(Result), cgoAllocsUnknown + __ret := C.nvmlErrorString(cResult) + __v := packPCharString(__ret) + return __v +} + +// nvmlSystemGetDriverVersion function as declared in nvml/nvml.h +func nvmlSystemGetDriverVersion(Version *byte, Length uint32) Return { + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetDriverVersion(cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetNVMLVersion function as declared in nvml/nvml.h +func nvmlSystemGetNVMLVersion(Version *byte, Length uint32) Return { + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetNVMLVersion(cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetCudaDriverVersion function as declared in nvml/nvml.h +func nvmlSystemGetCudaDriverVersion(CudaDriverVersion *int32) Return { + cCudaDriverVersion, _ := (*C.int)(unsafe.Pointer(CudaDriverVersion)), cgoAllocsUnknown + __ret := C.nvmlSystemGetCudaDriverVersion(cCudaDriverVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetCudaDriverVersion_v2 function as declared in nvml/nvml.h +func nvmlSystemGetCudaDriverVersion_v2(CudaDriverVersion *int32) Return { + cCudaDriverVersion, _ := (*C.int)(unsafe.Pointer(CudaDriverVersion)), cgoAllocsUnknown + __ret := C.nvmlSystemGetCudaDriverVersion_v2(cCudaDriverVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetProcessName function as declared in nvml/nvml.h +func nvmlSystemGetProcessName(Pid uint32, Name *byte, Length uint32) Return { + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cName, _ := (*C.char)(unsafe.Pointer(Name)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetProcessName(cPid, cName, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetHicVersion function as declared in nvml/nvml.h +func nvmlSystemGetHicVersion(HwbcCount *uint32, HwbcEntries *HwbcEntry) Return { + cHwbcCount, _ := (*C.uint)(unsafe.Pointer(HwbcCount)), cgoAllocsUnknown + cHwbcEntries, _ := (*C.nvmlHwbcEntry_t)(unsafe.Pointer(HwbcEntries)), cgoAllocsUnknown + __ret := C.nvmlSystemGetHicVersion(cHwbcCount, cHwbcEntries) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetTopologyGpuSet function as declared in nvml/nvml.h +func nvmlSystemGetTopologyGpuSet(CpuNumber uint32, Count *uint32, DeviceArray *nvmlDevice) Return { + cCpuNumber, _ := (C.uint)(CpuNumber), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown + __ret := C.nvmlSystemGetTopologyGpuSet(cCpuNumber, cCount, cDeviceArray) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetCount function as declared in nvml/nvml.h +func nvmlUnitGetCount(UnitCount *uint32) Return { + cUnitCount, _ := (*C.uint)(unsafe.Pointer(UnitCount)), cgoAllocsUnknown + __ret := C.nvmlUnitGetCount(cUnitCount) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetHandleByIndex function as declared in nvml/nvml.h +func nvmlUnitGetHandleByIndex(Index uint32, nvmlUnit *nvmlUnit) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cnvmlUnit, _ := (*C.nvmlUnit_t)(unsafe.Pointer(nvmlUnit)), cgoAllocsUnknown + __ret := C.nvmlUnitGetHandleByIndex(cIndex, cnvmlUnit) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetUnitInfo function as declared in nvml/nvml.h +func nvmlUnitGetUnitInfo(nvmlUnit nvmlUnit, Info *UnitInfo) Return { + cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlUnitInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlUnitGetUnitInfo(cnvmlUnit, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetLedState function as declared in nvml/nvml.h +func nvmlUnitGetLedState(nvmlUnit nvmlUnit, State *LedState) Return { + cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown + cState, _ := (*C.nvmlLedState_t)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlUnitGetLedState(cnvmlUnit, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetPsuInfo function as declared in nvml/nvml.h +func nvmlUnitGetPsuInfo(nvmlUnit nvmlUnit, Psu *PSUInfo) Return { + cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown + cPsu, _ := (*C.nvmlPSUInfo_t)(unsafe.Pointer(Psu)), cgoAllocsUnknown + __ret := C.nvmlUnitGetPsuInfo(cnvmlUnit, cPsu) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetTemperature function as declared in nvml/nvml.h +func nvmlUnitGetTemperature(nvmlUnit nvmlUnit, _type uint32, Temp *uint32) Return { + cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown + c_type, _ := (C.uint)(_type), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlUnitGetTemperature(cnvmlUnit, c_type, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetFanSpeedInfo function as declared in nvml/nvml.h +func nvmlUnitGetFanSpeedInfo(nvmlUnit nvmlUnit, FanSpeeds *UnitFanSpeeds) Return { + cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown + cFanSpeeds, _ := (*C.nvmlUnitFanSpeeds_t)(unsafe.Pointer(FanSpeeds)), cgoAllocsUnknown + __ret := C.nvmlUnitGetFanSpeedInfo(cnvmlUnit, cFanSpeeds) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetDevices function as declared in nvml/nvml.h +func nvmlUnitGetDevices(nvmlUnit nvmlUnit, DeviceCount *uint32, Devices *nvmlDevice) Return { + cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + cDevices, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Devices)), cgoAllocsUnknown + __ret := C.nvmlUnitGetDevices(cnvmlUnit, cDeviceCount, cDevices) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCount_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetCount_v2(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCount_v2(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAttributes_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetAttributes_v2(nvmlDevice nvmlDevice, Attributes *DeviceAttributes) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cAttributes, _ := (*C.nvmlDeviceAttributes_t)(unsafe.Pointer(Attributes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAttributes_v2(cnvmlDevice, cAttributes) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByIndex_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByIndex_v2(Index uint32, nvmlDevice *nvmlDevice) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByIndex_v2(cIndex, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleBySerial function as declared in nvml/nvml.h +func nvmlDeviceGetHandleBySerial(Serial string, nvmlDevice *nvmlDevice) Return { + cSerial, _ := unpackPCharString(Serial) + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleBySerial(cSerial, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByUUID function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByUUID(Uuid string, nvmlDevice *nvmlDevice) Return { + cUuid, _ := unpackPCharString(Uuid) + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByUUID(cUuid, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByPciBusId_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByPciBusId_v2(PciBusId string, nvmlDevice *nvmlDevice) Return { + cPciBusId, _ := unpackPCharString(PciBusId) + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByPciBusId_v2(cPciBusId, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetName function as declared in nvml/nvml.h +func nvmlDeviceGetName(nvmlDevice nvmlDevice, Name *byte, Length uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cName, _ := (*C.char)(unsafe.Pointer(Name)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetName(cnvmlDevice, cName, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBrand function as declared in nvml/nvml.h +func nvmlDeviceGetBrand(nvmlDevice nvmlDevice, _type *BrandType) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (*C.nvmlBrandType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBrand(cnvmlDevice, c_type) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetIndex function as declared in nvml/nvml.h +func nvmlDeviceGetIndex(nvmlDevice nvmlDevice, Index *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIndex, _ := (*C.uint)(unsafe.Pointer(Index)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetIndex(cnvmlDevice, cIndex) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSerial function as declared in nvml/nvml.h +func nvmlDeviceGetSerial(nvmlDevice nvmlDevice, Serial *byte, Length uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSerial, _ := (*C.char)(unsafe.Pointer(Serial)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSerial(cnvmlDevice, cSerial, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetModuleId function as declared in nvml/nvml.h +func nvmlDeviceGetModuleId(nvmlDevice nvmlDevice, ModuleId *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cModuleId, _ := (*C.uint)(unsafe.Pointer(ModuleId)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetModuleId(cnvmlDevice, cModuleId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetC2cModeInfoV function as declared in nvml/nvml.h +func nvmlDeviceGetC2cModeInfoV(nvmlDevice nvmlDevice, C2cModeInfo *C2cModeInfo_v1) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cC2cModeInfo, _ := (*C.nvmlC2cModeInfo_v1_t)(unsafe.Pointer(C2cModeInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetC2cModeInfoV(cnvmlDevice, cC2cModeInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryAffinity function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryAffinity(nvmlDevice nvmlDevice, NodeSetSize uint32, NodeSet *uint, Scope AffinityScope) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cNodeSetSize, _ := (C.uint)(NodeSetSize), cgoAllocsUnknown + cNodeSet, _ := (*C.ulong)(unsafe.Pointer(NodeSet)), cgoAllocsUnknown + cScope, _ := (C.nvmlAffinityScope_t)(Scope), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryAffinity(cnvmlDevice, cNodeSetSize, cNodeSet, cScope) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCpuAffinityWithinScope function as declared in nvml/nvml.h +func nvmlDeviceGetCpuAffinityWithinScope(nvmlDevice nvmlDevice, CpuSetSize uint32, CpuSet *uint, Scope AffinityScope) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCpuSetSize, _ := (C.uint)(CpuSetSize), cgoAllocsUnknown + cCpuSet, _ := (*C.ulong)(unsafe.Pointer(CpuSet)), cgoAllocsUnknown + cScope, _ := (C.nvmlAffinityScope_t)(Scope), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCpuAffinityWithinScope(cnvmlDevice, cCpuSetSize, cCpuSet, cScope) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceGetCpuAffinity(nvmlDevice nvmlDevice, CpuSetSize uint32, CpuSet *uint) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCpuSetSize, _ := (C.uint)(CpuSetSize), cgoAllocsUnknown + cCpuSet, _ := (*C.ulong)(unsafe.Pointer(CpuSet)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCpuAffinity(cnvmlDevice, cCpuSetSize, cCpuSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceSetCpuAffinity(nvmlDevice nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetCpuAffinity(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceClearCpuAffinity(nvmlDevice nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearCpuAffinity(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNumaNodeId function as declared in nvml/nvml.h +func nvmlDeviceGetNumaNodeId(nvmlDevice nvmlDevice, Node *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cNode, _ := (*C.uint)(unsafe.Pointer(Node)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumaNodeId(cnvmlDevice, cNode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTopologyCommonAncestor function as declared in nvml/nvml.h +func nvmlDeviceGetTopologyCommonAncestor(Device1 nvmlDevice, Device2 nvmlDevice, PathInfo *GpuTopologyLevel) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cPathInfo, _ := (*C.nvmlGpuTopologyLevel_t)(unsafe.Pointer(PathInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTopologyCommonAncestor(cDevice1, cDevice2, cPathInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTopologyNearestGpus function as declared in nvml/nvml.h +func nvmlDeviceGetTopologyNearestGpus(nvmlDevice nvmlDevice, Level GpuTopologyLevel, Count *uint32, DeviceArray *nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLevel, _ := (C.nvmlGpuTopologyLevel_t)(Level), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTopologyNearestGpus(cnvmlDevice, cLevel, cCount, cDeviceArray) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetP2PStatus function as declared in nvml/nvml.h +func nvmlDeviceGetP2PStatus(Device1 nvmlDevice, Device2 nvmlDevice, P2pIndex GpuP2PCapsIndex, P2pStatus *GpuP2PStatus) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cP2pIndex, _ := (C.nvmlGpuP2PCapsIndex_t)(P2pIndex), cgoAllocsUnknown + cP2pStatus, _ := (*C.nvmlGpuP2PStatus_t)(unsafe.Pointer(P2pStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetP2PStatus(cDevice1, cDevice2, cP2pIndex, cP2pStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetUUID function as declared in nvml/nvml.h +func nvmlDeviceGetUUID(nvmlDevice nvmlDevice, Uuid *byte, Length uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUuid, _ := (*C.char)(unsafe.Pointer(Uuid)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetUUID(cnvmlDevice, cUuid, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinorNumber function as declared in nvml/nvml.h +func nvmlDeviceGetMinorNumber(nvmlDevice nvmlDevice, MinorNumber *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinorNumber, _ := (*C.uint)(unsafe.Pointer(MinorNumber)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinorNumber(cnvmlDevice, cMinorNumber) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBoardPartNumber function as declared in nvml/nvml.h +func nvmlDeviceGetBoardPartNumber(nvmlDevice nvmlDevice, PartNumber *byte, Length uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPartNumber, _ := (*C.char)(unsafe.Pointer(PartNumber)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBoardPartNumber(cnvmlDevice, cPartNumber, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomVersion function as declared in nvml/nvml.h +func nvmlDeviceGetInforomVersion(nvmlDevice nvmlDevice, Object InforomObject, Version *byte, Length uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cObject, _ := (C.nvmlInforomObject_t)(Object), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomVersion(cnvmlDevice, cObject, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomImageVersion function as declared in nvml/nvml.h +func nvmlDeviceGetInforomImageVersion(nvmlDevice nvmlDevice, Version *byte, Length uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomImageVersion(cnvmlDevice, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomConfigurationChecksum function as declared in nvml/nvml.h +func nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice nvmlDevice, Checksum *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cChecksum, _ := (*C.uint)(unsafe.Pointer(Checksum)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomConfigurationChecksum(cnvmlDevice, cChecksum) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceValidateInforom function as declared in nvml/nvml.h +func nvmlDeviceValidateInforom(nvmlDevice nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceValidateInforom(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetLastBBXFlushTime function as declared in nvml/nvml.h +func nvmlDeviceGetLastBBXFlushTime(nvmlDevice nvmlDevice, Timestamp *uint64, DurationUs *uint) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cTimestamp, _ := (*C.ulonglong)(unsafe.Pointer(Timestamp)), cgoAllocsUnknown + cDurationUs, _ := (*C.ulong)(unsafe.Pointer(DurationUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetLastBBXFlushTime(cnvmlDevice, cTimestamp, cDurationUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDisplayMode function as declared in nvml/nvml.h +func nvmlDeviceGetDisplayMode(nvmlDevice nvmlDevice, Display *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cDisplay, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Display)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDisplayMode(cnvmlDevice, cDisplay) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDisplayActive function as declared in nvml/nvml.h +func nvmlDeviceGetDisplayActive(nvmlDevice nvmlDevice, IsActive *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIsActive, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsActive)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDisplayActive(cnvmlDevice, cIsActive) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPersistenceMode function as declared in nvml/nvml.h +func nvmlDeviceGetPersistenceMode(nvmlDevice nvmlDevice, Mode *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPersistenceMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfoExt function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfoExt(nvmlDevice nvmlDevice, Pci *PciInfoExt) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfoExt_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfoExt(cnvmlDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v3(nvmlDevice nvmlDevice, Pci *PciInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo_v3(cnvmlDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice nvmlDevice, MaxLinkGen *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMaxLinkGen, _ := (*C.uint)(unsafe.Pointer(MaxLinkGen)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxPcieLinkGeneration(cnvmlDevice, cMaxLinkGen) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuMaxPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetGpuMaxPcieLinkGeneration(nvmlDevice nvmlDevice, MaxLinkGenDevice *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMaxLinkGenDevice, _ := (*C.uint)(unsafe.Pointer(MaxLinkGenDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuMaxPcieLinkGeneration(cnvmlDevice, cMaxLinkGenDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxPcieLinkWidth function as declared in nvml/nvml.h +func nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice nvmlDevice, MaxLinkWidth *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMaxLinkWidth, _ := (*C.uint)(unsafe.Pointer(MaxLinkWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxPcieLinkWidth(cnvmlDevice, cMaxLinkWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice nvmlDevice, CurrLinkGen *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrLinkGen, _ := (*C.uint)(unsafe.Pointer(CurrLinkGen)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrPcieLinkGeneration(cnvmlDevice, cCurrLinkGen) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrPcieLinkWidth function as declared in nvml/nvml.h +func nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice nvmlDevice, CurrLinkWidth *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrLinkWidth, _ := (*C.uint)(unsafe.Pointer(CurrLinkWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrPcieLinkWidth(cnvmlDevice, cCurrLinkWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieThroughput function as declared in nvml/nvml.h +func nvmlDeviceGetPcieThroughput(nvmlDevice nvmlDevice, Counter PcieUtilCounter, Value *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCounter, _ := (C.nvmlPcieUtilCounter_t)(Counter), cgoAllocsUnknown + cValue, _ := (*C.uint)(unsafe.Pointer(Value)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieThroughput(cnvmlDevice, cCounter, cValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieReplayCounter function as declared in nvml/nvml.h +func nvmlDeviceGetPcieReplayCounter(nvmlDevice nvmlDevice, Value *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cValue, _ := (*C.uint)(unsafe.Pointer(Value)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieReplayCounter(cnvmlDevice, cValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClockInfo function as declared in nvml/nvml.h +func nvmlDeviceGetClockInfo(nvmlDevice nvmlDevice, _type ClockType, Clock *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cClock, _ := (*C.uint)(unsafe.Pointer(Clock)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClockInfo(cnvmlDevice, c_type, cClock) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxClockInfo function as declared in nvml/nvml.h +func nvmlDeviceGetMaxClockInfo(nvmlDevice nvmlDevice, _type ClockType, Clock *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cClock, _ := (*C.uint)(unsafe.Pointer(Clock)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxClockInfo(cnvmlDevice, c_type, cClock) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpcClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetGpcClkVfOffset(nvmlDevice nvmlDevice, Offset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpcClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetApplicationsClock function as declared in nvml/nvml.h +func nvmlDeviceGetApplicationsClock(nvmlDevice nvmlDevice, ClockType ClockType, ClockMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetApplicationsClock(cnvmlDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDefaultApplicationsClock function as declared in nvml/nvml.h +func nvmlDeviceGetDefaultApplicationsClock(nvmlDevice nvmlDevice, ClockType ClockType, ClockMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDefaultApplicationsClock(cnvmlDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClock function as declared in nvml/nvml.h +func nvmlDeviceGetClock(nvmlDevice nvmlDevice, ClockType ClockType, ClockId ClockId, ClockMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockId, _ := (C.nvmlClockId_t)(ClockId), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClock(cnvmlDevice, cClockType, cClockId, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxCustomerBoostClock function as declared in nvml/nvml.h +func nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice nvmlDevice, ClockType ClockType, ClockMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxCustomerBoostClock(cnvmlDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedMemoryClocks function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedMemoryClocks(nvmlDevice nvmlDevice, Count *uint32, ClocksMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cClocksMHz, _ := (*C.uint)(unsafe.Pointer(ClocksMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedMemoryClocks(cnvmlDevice, cCount, cClocksMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedGraphicsClocks function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice nvmlDevice, MemoryClockMHz uint32, Count *uint32, ClocksMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemoryClockMHz, _ := (C.uint)(MemoryClockMHz), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cClocksMHz, _ := (*C.uint)(unsafe.Pointer(ClocksMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedGraphicsClocks(cnvmlDevice, cMemoryClockMHz, cCount, cClocksMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, IsEnabled *EnableState, DefaultIsEnabled *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIsEnabled, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsEnabled)), cgoAllocsUnknown + cDefaultIsEnabled, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(DefaultIsEnabled)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAutoBoostedClocksEnabled(cnvmlDevice, cIsEnabled, cDefaultIsEnabled) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetFanSpeed(nvmlDevice nvmlDevice, Speed *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSpeed, _ := (*C.uint)(unsafe.Pointer(Speed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanSpeed(cnvmlDevice, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32, Speed *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cSpeed, _ := (*C.uint)(unsafe.Pointer(Speed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanSpeed_v2(cnvmlDevice, cFan, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTargetFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetTargetFanSpeed(nvmlDevice nvmlDevice, Fan uint32, TargetSpeed *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cTargetSpeed, _ := (*C.uint)(unsafe.Pointer(TargetSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTargetFanSpeed(cnvmlDevice, cFan, cTargetSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinMaxFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetMinMaxFanSpeed(nvmlDevice nvmlDevice, MinSpeed *uint32, MaxSpeed *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinSpeed, _ := (*C.uint)(unsafe.Pointer(MinSpeed)), cgoAllocsUnknown + cMaxSpeed, _ := (*C.uint)(unsafe.Pointer(MaxSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinMaxFanSpeed(cnvmlDevice, cMinSpeed, cMaxSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanControlPolicy_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetFanControlPolicy_v2(nvmlDevice nvmlDevice, Fan uint32, Policy *FanControlPolicy) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cPolicy, _ := (*C.nvmlFanControlPolicy_t)(unsafe.Pointer(Policy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanControlPolicy_v2(cnvmlDevice, cFan, cPolicy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNumFans function as declared in nvml/nvml.h +func nvmlDeviceGetNumFans(nvmlDevice nvmlDevice, NumFans *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cNumFans, _ := (*C.uint)(unsafe.Pointer(NumFans)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumFans(cnvmlDevice, cNumFans) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTemperature function as declared in nvml/nvml.h +func nvmlDeviceGetTemperature(nvmlDevice nvmlDevice, SensorType TemperatureSensors, Temp *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSensorType, _ := (C.nvmlTemperatureSensors_t)(SensorType), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTemperature(cnvmlDevice, cSensorType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTemperatureThreshold function as declared in nvml/nvml.h +func nvmlDeviceGetTemperatureThreshold(nvmlDevice nvmlDevice, ThresholdType TemperatureThresholds, Temp *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTemperatureThreshold(cnvmlDevice, cThresholdType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetThermalSettings function as declared in nvml/nvml.h +func nvmlDeviceGetThermalSettings(nvmlDevice nvmlDevice, SensorIndex uint32, PThermalSettings *GpuThermalSettings) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSensorIndex, _ := (C.uint)(SensorIndex), cgoAllocsUnknown + cPThermalSettings, _ := (*C.nvmlGpuThermalSettings_t)(unsafe.Pointer(PThermalSettings)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetThermalSettings(cnvmlDevice, cSensorIndex, cPThermalSettings) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPerformanceState function as declared in nvml/nvml.h +func nvmlDeviceGetPerformanceState(nvmlDevice nvmlDevice, PState *Pstates) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPState, _ := (*C.nvmlPstates_t)(unsafe.Pointer(PState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPerformanceState(cnvmlDevice, cPState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrentClocksEventReasons function as declared in nvml/nvml.h +func nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice nvmlDevice, ClocksEventReasons *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cClocksEventReasons, _ := (*C.ulonglong)(unsafe.Pointer(ClocksEventReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrentClocksEventReasons(cnvmlDevice, cClocksEventReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrentClocksThrottleReasons function as declared in nvml/nvml.h +func nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice nvmlDevice, ClocksThrottleReasons *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cClocksThrottleReasons, _ := (*C.ulonglong)(unsafe.Pointer(ClocksThrottleReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrentClocksThrottleReasons(cnvmlDevice, cClocksThrottleReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedClocksEventReasons function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice nvmlDevice, SupportedClocksEventReasons *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSupportedClocksEventReasons, _ := (*C.ulonglong)(unsafe.Pointer(SupportedClocksEventReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedClocksEventReasons(cnvmlDevice, cSupportedClocksEventReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedClocksThrottleReasons function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice nvmlDevice, SupportedClocksThrottleReasons *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSupportedClocksThrottleReasons, _ := (*C.ulonglong)(unsafe.Pointer(SupportedClocksThrottleReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedClocksThrottleReasons(cnvmlDevice, cSupportedClocksThrottleReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerState function as declared in nvml/nvml.h +func nvmlDeviceGetPowerState(nvmlDevice nvmlDevice, PState *Pstates) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPState, _ := (*C.nvmlPstates_t)(unsafe.Pointer(PState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerState(cnvmlDevice, cPState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDynamicPstatesInfo function as declared in nvml/nvml.h +func nvmlDeviceGetDynamicPstatesInfo(nvmlDevice nvmlDevice, PDynamicPstatesInfo *GpuDynamicPstatesInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPDynamicPstatesInfo, _ := (*C.nvmlGpuDynamicPstatesInfo_t)(unsafe.Pointer(PDynamicPstatesInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDynamicPstatesInfo(cnvmlDevice, cPDynamicPstatesInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetMemClkVfOffset(nvmlDevice nvmlDevice, Offset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinMaxClockOfPState function as declared in nvml/nvml.h +func nvmlDeviceGetMinMaxClockOfPState(nvmlDevice nvmlDevice, _type ClockType, Pstate Pstates, MinClockMHz *uint32, MaxClockMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cPstate, _ := (C.nvmlPstates_t)(Pstate), cgoAllocsUnknown + cMinClockMHz, _ := (*C.uint)(unsafe.Pointer(MinClockMHz)), cgoAllocsUnknown + cMaxClockMHz, _ := (*C.uint)(unsafe.Pointer(MaxClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinMaxClockOfPState(cnvmlDevice, c_type, cPstate, cMinClockMHz, cMaxClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedPerformanceStates function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedPerformanceStates(nvmlDevice nvmlDevice, Pstates *Pstates, Size uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPstates, _ := (*C.nvmlPstates_t)(unsafe.Pointer(Pstates)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedPerformanceStates(cnvmlDevice, cPstates, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpcClkMinMaxVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice nvmlDevice, MinOffset *int32, MaxOffset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown + cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpcClkMinMaxVfOffset(cnvmlDevice, cMinOffset, cMaxOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemClkMinMaxVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice nvmlDevice, MinOffset *int32, MaxOffset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown + cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemClkMinMaxVfOffset(cnvmlDevice, cMinOffset, cMaxOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementMode function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementMode(nvmlDevice nvmlDevice, Mode *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementLimit function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementLimit(nvmlDevice nvmlDevice, Limit *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLimit, _ := (*C.uint)(unsafe.Pointer(Limit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementLimit(cnvmlDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementLimitConstraints function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice nvmlDevice, MinLimit *uint32, MaxLimit *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinLimit, _ := (*C.uint)(unsafe.Pointer(MinLimit)), cgoAllocsUnknown + cMaxLimit, _ := (*C.uint)(unsafe.Pointer(MaxLimit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementLimitConstraints(cnvmlDevice, cMinLimit, cMaxLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementDefaultLimit function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice nvmlDevice, DefaultLimit *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cDefaultLimit, _ := (*C.uint)(unsafe.Pointer(DefaultLimit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementDefaultLimit(cnvmlDevice, cDefaultLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerUsage function as declared in nvml/nvml.h +func nvmlDeviceGetPowerUsage(nvmlDevice nvmlDevice, Power *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPower, _ := (*C.uint)(unsafe.Pointer(Power)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerUsage(cnvmlDevice, cPower) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTotalEnergyConsumption function as declared in nvml/nvml.h +func nvmlDeviceGetTotalEnergyConsumption(nvmlDevice nvmlDevice, Energy *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEnergy, _ := (*C.ulonglong)(unsafe.Pointer(Energy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTotalEnergyConsumption(cnvmlDevice, cEnergy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEnforcedPowerLimit function as declared in nvml/nvml.h +func nvmlDeviceGetEnforcedPowerLimit(nvmlDevice nvmlDevice, Limit *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLimit, _ := (*C.uint)(unsafe.Pointer(Limit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEnforcedPowerLimit(cnvmlDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuOperationMode function as declared in nvml/nvml.h +func nvmlDeviceGetGpuOperationMode(nvmlDevice nvmlDevice, Current *GpuOperationMode, Pending *GpuOperationMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlGpuOperationMode_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlGpuOperationMode_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuOperationMode(cnvmlDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryInfo function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryInfo(nvmlDevice nvmlDevice, Memory *Memory) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryInfo(cnvmlDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryInfo_v2(nvmlDevice nvmlDevice, Memory *Memory_v2) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_v2_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryInfo_v2(cnvmlDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeMode function as declared in nvml/nvml.h +func nvmlDeviceGetComputeMode(nvmlDevice nvmlDevice, Mode *ComputeMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (*C.nvmlComputeMode_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCudaComputeCapability function as declared in nvml/nvml.h +func nvmlDeviceGetCudaComputeCapability(nvmlDevice nvmlDevice, Major *int32, Minor *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMajor, _ := (*C.int)(unsafe.Pointer(Major)), cgoAllocsUnknown + cMinor, _ := (*C.int)(unsafe.Pointer(Minor)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCudaComputeCapability(cnvmlDevice, cMajor, cMinor) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEccMode function as declared in nvml/nvml.h +func nvmlDeviceGetEccMode(nvmlDevice nvmlDevice, Current *EnableState, Pending *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEccMode(cnvmlDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDefaultEccMode function as declared in nvml/nvml.h +func nvmlDeviceGetDefaultEccMode(nvmlDevice nvmlDevice, DefaultMode *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cDefaultMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(DefaultMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDefaultEccMode(cnvmlDevice, cDefaultMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBoardId function as declared in nvml/nvml.h +func nvmlDeviceGetBoardId(nvmlDevice nvmlDevice, BoardId *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cBoardId, _ := (*C.uint)(unsafe.Pointer(BoardId)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBoardId(cnvmlDevice, cBoardId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMultiGpuBoard function as declared in nvml/nvml.h +func nvmlDeviceGetMultiGpuBoard(nvmlDevice nvmlDevice, MultiGpuBool *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMultiGpuBool, _ := (*C.uint)(unsafe.Pointer(MultiGpuBool)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMultiGpuBoard(cnvmlDevice, cMultiGpuBool) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTotalEccErrors function as declared in nvml/nvml.h +func nvmlDeviceGetTotalEccErrors(nvmlDevice nvmlDevice, ErrorType MemoryErrorType, CounterType EccCounterType, EccCounts *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cEccCounts, _ := (*C.ulonglong)(unsafe.Pointer(EccCounts)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTotalEccErrors(cnvmlDevice, cErrorType, cCounterType, cEccCounts) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDetailedEccErrors function as declared in nvml/nvml.h +func nvmlDeviceGetDetailedEccErrors(nvmlDevice nvmlDevice, ErrorType MemoryErrorType, CounterType EccCounterType, EccCounts *EccErrorCounts) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cEccCounts, _ := (*C.nvmlEccErrorCounts_t)(unsafe.Pointer(EccCounts)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDetailedEccErrors(cnvmlDevice, cErrorType, cCounterType, cEccCounts) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryErrorCounter function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryErrorCounter(nvmlDevice nvmlDevice, ErrorType MemoryErrorType, CounterType EccCounterType, LocationType MemoryLocation, Count *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cLocationType, _ := (C.nvmlMemoryLocation_t)(LocationType), cgoAllocsUnknown + cCount, _ := (*C.ulonglong)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryErrorCounter(cnvmlDevice, cErrorType, cCounterType, cLocationType, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetUtilizationRates function as declared in nvml/nvml.h +func nvmlDeviceGetUtilizationRates(nvmlDevice nvmlDevice, Utilization *Utilization) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.nvmlUtilization_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetUtilizationRates(cnvmlDevice, cUtilization) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderUtilization(nvmlDevice nvmlDevice, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderUtilization(cnvmlDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderCapacity function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderCapacity(nvmlDevice nvmlDevice, EncoderQueryType EncoderType, EncoderCapacity *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEncoderQueryType, _ := (C.nvmlEncoderType_t)(EncoderQueryType), cgoAllocsUnknown + cEncoderCapacity, _ := (*C.uint)(unsafe.Pointer(EncoderCapacity)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderCapacity(cnvmlDevice, cEncoderQueryType, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderStats function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderStats(nvmlDevice nvmlDevice, SessionCount *uint32, AverageFps *uint32, AverageLatency *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cAverageFps, _ := (*C.uint)(unsafe.Pointer(AverageFps)), cgoAllocsUnknown + cAverageLatency, _ := (*C.uint)(unsafe.Pointer(AverageLatency)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderStats(cnvmlDevice, cSessionCount, cAverageFps, cAverageLatency) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderSessions function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderSessions(nvmlDevice nvmlDevice, SessionCount *uint32, SessionInfos *EncoderSessionInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfos, _ := (*C.nvmlEncoderSessionInfo_t)(unsafe.Pointer(SessionInfos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderSessions(cnvmlDevice, cSessionCount, cSessionInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDecoderUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetDecoderUtilization(nvmlDevice nvmlDevice, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDecoderUtilization(cnvmlDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetJpgUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetJpgUtilization(nvmlDevice nvmlDevice, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetJpgUtilization(cnvmlDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetOfaUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetOfaUtilization(nvmlDevice nvmlDevice, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetOfaUtilization(cnvmlDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFBCStats function as declared in nvml/nvml.h +func nvmlDeviceGetFBCStats(nvmlDevice nvmlDevice, FbcStats *FBCStats) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFbcStats, _ := (*C.nvmlFBCStats_t)(unsafe.Pointer(FbcStats)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFBCStats(cnvmlDevice, cFbcStats) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFBCSessions function as declared in nvml/nvml.h +func nvmlDeviceGetFBCSessions(nvmlDevice nvmlDevice, SessionCount *uint32, SessionInfo *FBCSessionInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlFBCSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFBCSessions(cnvmlDevice, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDriverModel function as declared in nvml/nvml.h +func nvmlDeviceGetDriverModel(nvmlDevice nvmlDevice, Current *DriverModel, Pending *DriverModel) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDriverModel(cnvmlDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVbiosVersion function as declared in nvml/nvml.h +func nvmlDeviceGetVbiosVersion(nvmlDevice nvmlDevice, Version *byte, Length uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVbiosVersion(cnvmlDevice, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBridgeChipInfo function as declared in nvml/nvml.h +func nvmlDeviceGetBridgeChipInfo(nvmlDevice nvmlDevice, BridgeHierarchy *BridgeChipHierarchy) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cBridgeHierarchy, _ := (*C.nvmlBridgeChipHierarchy_t)(unsafe.Pointer(BridgeHierarchy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBridgeChipInfo(cnvmlDevice, cBridgeHierarchy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses_v3(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v3(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses_v3(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses_v3(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRunningProcessDetailList function as declared in nvml/nvml.h +func nvmlDeviceGetRunningProcessDetailList(nvmlDevice nvmlDevice, Plist *ProcessDetailList) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPlist, _ := (*C.nvmlProcessDetailList_t)(unsafe.Pointer(Plist)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRunningProcessDetailList(cnvmlDevice, cPlist) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceOnSameBoard function as declared in nvml/nvml.h +func nvmlDeviceOnSameBoard(Device1 nvmlDevice, Device2 nvmlDevice, OnSameBoard *int32) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cOnSameBoard, _ := (*C.int)(unsafe.Pointer(OnSameBoard)), cgoAllocsUnknown + __ret := C.nvmlDeviceOnSameBoard(cDevice1, cDevice2, cOnSameBoard) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAPIRestriction function as declared in nvml/nvml.h +func nvmlDeviceGetAPIRestriction(nvmlDevice nvmlDevice, ApiType RestrictedAPI, IsRestricted *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cApiType, _ := (C.nvmlRestrictedAPI_t)(ApiType), cgoAllocsUnknown + cIsRestricted, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsRestricted)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAPIRestriction(cnvmlDevice, cApiType, cIsRestricted) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSamples function as declared in nvml/nvml.h +func nvmlDeviceGetSamples(nvmlDevice nvmlDevice, _type SamplingType, LastSeenTimeStamp uint64, SampleValType *ValueType, SampleCount *uint32, Samples *Sample) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (C.nvmlSamplingType_t)(_type), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cSampleValType, _ := (*C.nvmlValueType_t)(unsafe.Pointer(SampleValType)), cgoAllocsUnknown + cSampleCount, _ := (*C.uint)(unsafe.Pointer(SampleCount)), cgoAllocsUnknown + cSamples, _ := (*C.nvmlSample_t)(unsafe.Pointer(Samples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSamples(cnvmlDevice, c_type, cLastSeenTimeStamp, cSampleValType, cSampleCount, cSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBAR1MemoryInfo function as declared in nvml/nvml.h +func nvmlDeviceGetBAR1MemoryInfo(nvmlDevice nvmlDevice, Bar1Memory *BAR1Memory) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cBar1Memory, _ := (*C.nvmlBAR1Memory_t)(unsafe.Pointer(Bar1Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBAR1MemoryInfo(cnvmlDevice, cBar1Memory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetViolationStatus function as declared in nvml/nvml.h +func nvmlDeviceGetViolationStatus(nvmlDevice nvmlDevice, PerfPolicyType PerfPolicyType, ViolTime *ViolationTime) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPerfPolicyType, _ := (C.nvmlPerfPolicyType_t)(PerfPolicyType), cgoAllocsUnknown + cViolTime, _ := (*C.nvmlViolationTime_t)(unsafe.Pointer(ViolTime)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetViolationStatus(cnvmlDevice, cPerfPolicyType, cViolTime) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetIrqNum function as declared in nvml/nvml.h +func nvmlDeviceGetIrqNum(nvmlDevice nvmlDevice, IrqNum *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIrqNum, _ := (*C.uint)(unsafe.Pointer(IrqNum)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetIrqNum(cnvmlDevice, cIrqNum) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNumGpuCores function as declared in nvml/nvml.h +func nvmlDeviceGetNumGpuCores(nvmlDevice nvmlDevice, NumCores *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cNumCores, _ := (*C.uint)(unsafe.Pointer(NumCores)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumGpuCores(cnvmlDevice, cNumCores) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerSource function as declared in nvml/nvml.h +func nvmlDeviceGetPowerSource(nvmlDevice nvmlDevice, PowerSource *PowerSource) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPowerSource, _ := (*C.nvmlPowerSource_t)(unsafe.Pointer(PowerSource)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerSource(cnvmlDevice, cPowerSource) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryBusWidth function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryBusWidth(nvmlDevice nvmlDevice, BusWidth *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cBusWidth, _ := (*C.uint)(unsafe.Pointer(BusWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryBusWidth(cnvmlDevice, cBusWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieLinkMaxSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice nvmlDevice, MaxSpeed *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMaxSpeed, _ := (*C.uint)(unsafe.Pointer(MaxSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieLinkMaxSpeed(cnvmlDevice, cMaxSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetPcieSpeed(nvmlDevice nvmlDevice, PcieSpeed *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPcieSpeed, _ := (*C.uint)(unsafe.Pointer(PcieSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieSpeed(cnvmlDevice, cPcieSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAdaptiveClockInfoStatus function as declared in nvml/nvml.h +func nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice nvmlDevice, AdaptiveClockStatus *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cAdaptiveClockStatus, _ := (*C.uint)(unsafe.Pointer(AdaptiveClockStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAdaptiveClockInfoStatus(cnvmlDevice, cAdaptiveClockStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBusType function as declared in nvml/nvml.h +func nvmlDeviceGetBusType(nvmlDevice nvmlDevice, _type *BusType) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (*C.nvmlBusType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBusType(cnvmlDevice, c_type) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuFabricInfo function as declared in nvml/nvml.h +func nvmlDeviceGetGpuFabricInfo(nvmlDevice nvmlDevice, GpuFabricInfo *GpuFabricInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuFabricInfo, _ := (*C.nvmlGpuFabricInfo_t)(unsafe.Pointer(GpuFabricInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuFabricInfo(cnvmlDevice, cGpuFabricInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuFabricInfoV function as declared in nvml/nvml.h +func nvmlDeviceGetGpuFabricInfoV(nvmlDevice nvmlDevice, GpuFabricInfo *GpuFabricInfoV) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuFabricInfo, _ := (*C.nvmlGpuFabricInfoV_t)(unsafe.Pointer(GpuFabricInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuFabricInfoV(cnvmlDevice, cGpuFabricInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeCapabilities function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeCapabilities(Capabilities *ConfComputeSystemCaps) Return { + cCapabilities, _ := (*C.nvmlConfComputeSystemCaps_t)(unsafe.Pointer(Capabilities)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeCapabilities(cCapabilities) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeState function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeState(State *ConfComputeSystemState) Return { + cState, _ := (*C.nvmlConfComputeSystemState_t)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeState(cState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeMemSizeInfo function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice nvmlDevice, MemInfo *ConfComputeMemSizeInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemInfo, _ := (*C.nvmlConfComputeMemSizeInfo_t)(unsafe.Pointer(MemInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeMemSizeInfo(cnvmlDevice, cMemInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeGpusReadyState function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeGpusReadyState(IsAcceptingWork *uint32) Return { + cIsAcceptingWork, _ := (*C.uint)(unsafe.Pointer(IsAcceptingWork)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeGpusReadyState(cIsAcceptingWork) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeProtectedMemoryUsage function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice nvmlDevice, Memory *Memory) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeProtectedMemoryUsage(cnvmlDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeGpuCertificate function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice nvmlDevice, GpuCert *ConfComputeGpuCertificate) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuCert, _ := (*C.nvmlConfComputeGpuCertificate_t)(unsafe.Pointer(GpuCert)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeGpuCertificate(cnvmlDevice, cGpuCert) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeGpuAttestationReport function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice nvmlDevice, GpuAtstReport *ConfComputeGpuAttestationReport) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuAtstReport, _ := (*C.nvmlConfComputeGpuAttestationReport_t)(unsafe.Pointer(GpuAtstReport)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeGpuAttestationReport(cnvmlDevice, cGpuAtstReport) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeKeyRotationThresholdInfo function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeKeyRotationThresholdInfo(PKeyRotationThrInfo *ConfComputeGetKeyRotationThresholdInfo) Return { + cPKeyRotationThrInfo, _ := (*C.nvmlConfComputeGetKeyRotationThresholdInfo_t)(unsafe.Pointer(PKeyRotationThrInfo)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeKeyRotationThresholdInfo(cPKeyRotationThrInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeSettings function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeSettings(Settings *SystemConfComputeSettings) Return { + cSettings, _ := (*C.nvmlSystemConfComputeSettings_t)(unsafe.Pointer(Settings)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeSettings(cSettings) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGspFirmwareVersion function as declared in nvml/nvml.h +func nvmlDeviceGetGspFirmwareVersion(nvmlDevice nvmlDevice, Version *byte) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGspFirmwareVersion(cnvmlDevice, cVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGspFirmwareMode function as declared in nvml/nvml.h +func nvmlDeviceGetGspFirmwareMode(nvmlDevice nvmlDevice, IsEnabled *uint32, DefaultMode *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIsEnabled, _ := (*C.uint)(unsafe.Pointer(IsEnabled)), cgoAllocsUnknown + cDefaultMode, _ := (*C.uint)(unsafe.Pointer(DefaultMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGspFirmwareMode(cnvmlDevice, cIsEnabled, cDefaultMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingMode function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingMode(nvmlDevice nvmlDevice, Mode *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingStats function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingStats(nvmlDevice nvmlDevice, Pid uint32, Stats *AccountingStats) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cStats, _ := (*C.nvmlAccountingStats_t)(unsafe.Pointer(Stats)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingStats(cnvmlDevice, cPid, cStats) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingPids function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingPids(nvmlDevice nvmlDevice, Count *uint32, Pids *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cPids, _ := (*C.uint)(unsafe.Pointer(Pids)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingPids(cnvmlDevice, cCount, cPids) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingBufferSize function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingBufferSize(nvmlDevice nvmlDevice, BufferSize *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingBufferSize(cnvmlDevice, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPages function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPages(nvmlDevice nvmlDevice, Cause PageRetirementCause, PageCount *uint32, Addresses *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCause, _ := (C.nvmlPageRetirementCause_t)(Cause), cgoAllocsUnknown + cPageCount, _ := (*C.uint)(unsafe.Pointer(PageCount)), cgoAllocsUnknown + cAddresses, _ := (*C.ulonglong)(unsafe.Pointer(Addresses)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPages(cnvmlDevice, cCause, cPageCount, cAddresses) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPages_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPages_v2(nvmlDevice nvmlDevice, Cause PageRetirementCause, PageCount *uint32, Addresses *uint64, Timestamps *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCause, _ := (C.nvmlPageRetirementCause_t)(Cause), cgoAllocsUnknown + cPageCount, _ := (*C.uint)(unsafe.Pointer(PageCount)), cgoAllocsUnknown + cAddresses, _ := (*C.ulonglong)(unsafe.Pointer(Addresses)), cgoAllocsUnknown + cTimestamps, _ := (*C.ulonglong)(unsafe.Pointer(Timestamps)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPages_v2(cnvmlDevice, cCause, cPageCount, cAddresses, cTimestamps) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPagesPendingStatus function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice nvmlDevice, IsPending *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIsPending, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsPending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPagesPendingStatus(cnvmlDevice, cIsPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRemappedRows function as declared in nvml/nvml.h +func nvmlDeviceGetRemappedRows(nvmlDevice nvmlDevice, CorrRows *uint32, UncRows *uint32, IsPending *uint32, FailureOccurred *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCorrRows, _ := (*C.uint)(unsafe.Pointer(CorrRows)), cgoAllocsUnknown + cUncRows, _ := (*C.uint)(unsafe.Pointer(UncRows)), cgoAllocsUnknown + cIsPending, _ := (*C.uint)(unsafe.Pointer(IsPending)), cgoAllocsUnknown + cFailureOccurred, _ := (*C.uint)(unsafe.Pointer(FailureOccurred)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRemappedRows(cnvmlDevice, cCorrRows, cUncRows, cIsPending, cFailureOccurred) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRowRemapperHistogram function as declared in nvml/nvml.h +func nvmlDeviceGetRowRemapperHistogram(nvmlDevice nvmlDevice, Values *RowRemapperHistogramValues) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cValues, _ := (*C.nvmlRowRemapperHistogramValues_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRowRemapperHistogram(cnvmlDevice, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetArchitecture function as declared in nvml/nvml.h +func nvmlDeviceGetArchitecture(nvmlDevice nvmlDevice, Arch *DeviceArchitecture) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cArch, _ := (*C.nvmlDeviceArchitecture_t)(unsafe.Pointer(Arch)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetArchitecture(cnvmlDevice, cArch) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClkMonStatus function as declared in nvml/nvml.h +func nvmlDeviceGetClkMonStatus(nvmlDevice nvmlDevice, Status *ClkMonStatus) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cStatus, _ := (*C.nvmlClkMonStatus_t)(unsafe.Pointer(Status)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClkMonStatus(cnvmlDevice, cStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetProcessUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetProcessUtilization(nvmlDevice nvmlDevice, Utilization *ProcessUtilizationSample, ProcessSamplesCount *uint32, LastSeenTimeStamp uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.nvmlProcessUtilizationSample_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(ProcessSamplesCount)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + __ret := C.nvmlDeviceGetProcessUtilization(cnvmlDevice, cUtilization, cProcessSamplesCount, cLastSeenTimeStamp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetProcessesUtilizationInfo function as declared in nvml/nvml.h +func nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice nvmlDevice, ProcesesUtilInfo *ProcessesUtilizationInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProcesesUtilInfo, _ := (*C.nvmlProcessesUtilizationInfo_t)(unsafe.Pointer(ProcesesUtilInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetProcessesUtilizationInfo(cnvmlDevice, cProcesesUtilInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitSetLedState function as declared in nvml/nvml.h +func nvmlUnitSetLedState(nvmlUnit nvmlUnit, Color LedColor) Return { + cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown + cColor, _ := (C.nvmlLedColor_t)(Color), cgoAllocsUnknown + __ret := C.nvmlUnitSetLedState(cnvmlUnit, cColor) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetPersistenceMode function as declared in nvml/nvml.h +func nvmlDeviceSetPersistenceMode(nvmlDevice nvmlDevice, Mode EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (C.nvmlEnableState_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPersistenceMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetComputeMode function as declared in nvml/nvml.h +func nvmlDeviceSetComputeMode(nvmlDevice nvmlDevice, Mode ComputeMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (C.nvmlComputeMode_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetComputeMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetEccMode function as declared in nvml/nvml.h +func nvmlDeviceSetEccMode(nvmlDevice nvmlDevice, Ecc EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEcc, _ := (C.nvmlEnableState_t)(Ecc), cgoAllocsUnknown + __ret := C.nvmlDeviceSetEccMode(cnvmlDevice, cEcc) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearEccErrorCounts function as declared in nvml/nvml.h +func nvmlDeviceClearEccErrorCounts(nvmlDevice nvmlDevice, CounterType EccCounterType) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + __ret := C.nvmlDeviceClearEccErrorCounts(cnvmlDevice, cCounterType) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDriverModel function as declared in nvml/nvml.h +func nvmlDeviceSetDriverModel(nvmlDevice nvmlDevice, DriverModel DriverModel, Flags uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cDriverModel, _ := (C.nvmlDriverModel_t)(DriverModel), cgoAllocsUnknown + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDriverModel(cnvmlDevice, cDriverModel, cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpuLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceSetGpuLockedClocks(nvmlDevice nvmlDevice, MinGpuClockMHz uint32, MaxGpuClockMHz uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinGpuClockMHz, _ := (C.uint)(MinGpuClockMHz), cgoAllocsUnknown + cMaxGpuClockMHz, _ := (C.uint)(MaxGpuClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpuLockedClocks(cnvmlDevice, cMinGpuClockMHz, cMaxGpuClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetGpuLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceResetGpuLockedClocks(nvmlDevice nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetGpuLockedClocks(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMemoryLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceSetMemoryLockedClocks(nvmlDevice nvmlDevice, MinMemClockMHz uint32, MaxMemClockMHz uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinMemClockMHz, _ := (C.uint)(MinMemClockMHz), cgoAllocsUnknown + cMaxMemClockMHz, _ := (C.uint)(MaxMemClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMemoryLockedClocks(cnvmlDevice, cMinMemClockMHz, cMaxMemClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetMemoryLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceResetMemoryLockedClocks(nvmlDevice nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetMemoryLockedClocks(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetApplicationsClocks function as declared in nvml/nvml.h +func nvmlDeviceSetApplicationsClocks(nvmlDevice nvmlDevice, MemClockMHz uint32, GraphicsClockMHz uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemClockMHz, _ := (C.uint)(MemClockMHz), cgoAllocsUnknown + cGraphicsClockMHz, _ := (C.uint)(GraphicsClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetApplicationsClocks(cnvmlDevice, cMemClockMHz, cGraphicsClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetApplicationsClocks function as declared in nvml/nvml.h +func nvmlDeviceResetApplicationsClocks(nvmlDevice nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetApplicationsClocks(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, Enabled EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAutoBoostedClocksEnabled(cnvmlDevice, cEnabled) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDefaultAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, Enabled EnableState, Flags uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDefaultAutoBoostedClocksEnabled(cnvmlDevice, cEnabled, cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDefaultFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDefaultFanSpeed_v2(cnvmlDevice, cFan) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetFanControlPolicy function as declared in nvml/nvml.h +func nvmlDeviceSetFanControlPolicy(nvmlDevice nvmlDevice, Fan uint32, Policy FanControlPolicy) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cPolicy, _ := (C.nvmlFanControlPolicy_t)(Policy), cgoAllocsUnknown + __ret := C.nvmlDeviceSetFanControlPolicy(cnvmlDevice, cFan, cPolicy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetTemperatureThreshold function as declared in nvml/nvml.h +func nvmlDeviceSetTemperatureThreshold(nvmlDevice nvmlDevice, ThresholdType TemperatureThresholds, Temp *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown + cTemp, _ := (*C.int)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetTemperatureThreshold(cnvmlDevice, cThresholdType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetPowerManagementLimit function as declared in nvml/nvml.h +func nvmlDeviceSetPowerManagementLimit(nvmlDevice nvmlDevice, Limit uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLimit, _ := (C.uint)(Limit), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPowerManagementLimit(cnvmlDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpuOperationMode function as declared in nvml/nvml.h +func nvmlDeviceSetGpuOperationMode(nvmlDevice nvmlDevice, Mode GpuOperationMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (C.nvmlGpuOperationMode_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpuOperationMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAPIRestriction function as declared in nvml/nvml.h +func nvmlDeviceSetAPIRestriction(nvmlDevice nvmlDevice, ApiType RestrictedAPI, IsRestricted EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cApiType, _ := (C.nvmlRestrictedAPI_t)(ApiType), cgoAllocsUnknown + cIsRestricted, _ := (C.nvmlEnableState_t)(IsRestricted), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAPIRestriction(cnvmlDevice, cApiType, cIsRestricted) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32, Speed uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cSpeed, _ := (C.uint)(Speed), cgoAllocsUnknown + __ret := C.nvmlDeviceSetFanSpeed_v2(cnvmlDevice, cFan, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpcClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceSetGpcClkVfOffset(nvmlDevice nvmlDevice, Offset int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (C.int)(Offset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpcClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMemClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceSetMemClkVfOffset(nvmlDevice nvmlDevice, Offset int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (C.int)(Offset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMemClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetConfComputeUnprotectedMemSize function as declared in nvml/nvml.h +func nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice nvmlDevice, SizeKiB uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSizeKiB, _ := (C.ulonglong)(SizeKiB), cgoAllocsUnknown + __ret := C.nvmlDeviceSetConfComputeUnprotectedMemSize(cnvmlDevice, cSizeKiB) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemSetConfComputeGpusReadyState function as declared in nvml/nvml.h +func nvmlSystemSetConfComputeGpusReadyState(IsAcceptingWork uint32) Return { + cIsAcceptingWork, _ := (C.uint)(IsAcceptingWork), cgoAllocsUnknown + __ret := C.nvmlSystemSetConfComputeGpusReadyState(cIsAcceptingWork) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemSetConfComputeKeyRotationThresholdInfo function as declared in nvml/nvml.h +func nvmlSystemSetConfComputeKeyRotationThresholdInfo(PKeyRotationThrInfo *ConfComputeSetKeyRotationThresholdInfo) Return { + cPKeyRotationThrInfo, _ := (*C.nvmlConfComputeSetKeyRotationThresholdInfo_t)(unsafe.Pointer(PKeyRotationThrInfo)), cgoAllocsUnknown + __ret := C.nvmlSystemSetConfComputeKeyRotationThresholdInfo(cPKeyRotationThrInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAccountingMode function as declared in nvml/nvml.h +func nvmlDeviceSetAccountingMode(nvmlDevice nvmlDevice, Mode EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (C.nvmlEnableState_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAccountingMode(cnvmlDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearAccountingPids function as declared in nvml/nvml.h +func nvmlDeviceClearAccountingPids(nvmlDevice nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearAccountingPids(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkState function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkState(nvmlDevice nvmlDevice, Link uint32, IsActive *EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cIsActive, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsActive)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkState(cnvmlDevice, cLink, cIsActive) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkVersion function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkVersion(nvmlDevice nvmlDevice, Link uint32, Version *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cVersion, _ := (*C.uint)(unsafe.Pointer(Version)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkVersion(cnvmlDevice, cLink, cVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkCapability function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkCapability(nvmlDevice nvmlDevice, Link uint32, Capability NvLinkCapability, CapResult *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCapability, _ := (C.nvmlNvLinkCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkCapability(cnvmlDevice, cLink, cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemotePciInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemotePciInfo_v2(nvmlDevice nvmlDevice, Link uint32, Pci *PciInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemotePciInfo_v2(cnvmlDevice, cLink, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkErrorCounter function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkErrorCounter(nvmlDevice nvmlDevice, Link uint32, Counter NvLinkErrorCounter, CounterValue *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.nvmlNvLinkErrorCounter_t)(Counter), cgoAllocsUnknown + cCounterValue, _ := (*C.ulonglong)(unsafe.Pointer(CounterValue)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkErrorCounter(cnvmlDevice, cLink, cCounter, cCounterValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetNvLinkErrorCounters function as declared in nvml/nvml.h +func nvmlDeviceResetNvLinkErrorCounters(nvmlDevice nvmlDevice, Link uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + __ret := C.nvmlDeviceResetNvLinkErrorCounters(cnvmlDevice, cLink) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetNvLinkUtilizationControl function as declared in nvml/nvml.h +func nvmlDeviceSetNvLinkUtilizationControl(nvmlDevice nvmlDevice, Link uint32, Counter uint32, Control *NvLinkUtilizationControl, Reset uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cControl, _ := (*C.nvmlNvLinkUtilizationControl_t)(unsafe.Pointer(Control)), cgoAllocsUnknown + cReset, _ := (C.uint)(Reset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetNvLinkUtilizationControl(cnvmlDevice, cLink, cCounter, cControl, cReset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkUtilizationControl function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkUtilizationControl(nvmlDevice nvmlDevice, Link uint32, Counter uint32, Control *NvLinkUtilizationControl) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cControl, _ := (*C.nvmlNvLinkUtilizationControl_t)(unsafe.Pointer(Control)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkUtilizationControl(cnvmlDevice, cLink, cCounter, cControl) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkUtilizationCounter(nvmlDevice nvmlDevice, Link uint32, Counter uint32, Rxcounter *uint64, Txcounter *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cRxcounter, _ := (*C.ulonglong)(unsafe.Pointer(Rxcounter)), cgoAllocsUnknown + cTxcounter, _ := (*C.ulonglong)(unsafe.Pointer(Txcounter)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkUtilizationCounter(cnvmlDevice, cLink, cCounter, cRxcounter, cTxcounter) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceFreezeNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceFreezeNvLinkUtilizationCounter(nvmlDevice nvmlDevice, Link uint32, Counter uint32, Freeze EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cFreeze, _ := (C.nvmlEnableState_t)(Freeze), cgoAllocsUnknown + __ret := C.nvmlDeviceFreezeNvLinkUtilizationCounter(cnvmlDevice, cLink, cCounter, cFreeze) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceResetNvLinkUtilizationCounter(nvmlDevice nvmlDevice, Link uint32, Counter uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + __ret := C.nvmlDeviceResetNvLinkUtilizationCounter(cnvmlDevice, cLink, cCounter) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemoteDeviceType function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice nvmlDevice, Link uint32, PNvLinkDeviceType *IntNvLinkDeviceType) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPNvLinkDeviceType, _ := (*C.nvmlIntNvLinkDeviceType_t)(unsafe.Pointer(PNvLinkDeviceType)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemoteDeviceType(cnvmlDevice, cLink, cPNvLinkDeviceType) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetCreate function as declared in nvml/nvml.h +func nvmlEventSetCreate(Set *nvmlEventSet) Return { + cSet, _ := (*C.nvmlEventSet_t)(unsafe.Pointer(Set)), cgoAllocsUnknown + __ret := C.nvmlEventSetCreate(cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRegisterEvents function as declared in nvml/nvml.h +func nvmlDeviceRegisterEvents(nvmlDevice nvmlDevice, EventTypes uint64, Set nvmlEventSet) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEventTypes, _ := (C.ulonglong)(EventTypes), cgoAllocsUnknown + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + __ret := C.nvmlDeviceRegisterEvents(cnvmlDevice, cEventTypes, cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedEventTypes function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedEventTypes(nvmlDevice nvmlDevice, EventTypes *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEventTypes, _ := (*C.ulonglong)(unsafe.Pointer(EventTypes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedEventTypes(cnvmlDevice, cEventTypes) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetWait_v2 function as declared in nvml/nvml.h +func nvmlEventSetWait_v2(Set nvmlEventSet, Data *nvmlEventData, Timeoutms uint32) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + cData, _ := (*C.nvmlEventData_t)(unsafe.Pointer(Data)), cgoAllocsUnknown + cTimeoutms, _ := (C.uint)(Timeoutms), cgoAllocsUnknown + __ret := C.nvmlEventSetWait_v2(cSet, cData, cTimeoutms) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetFree function as declared in nvml/nvml.h +func nvmlEventSetFree(Set nvmlEventSet) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + __ret := C.nvmlEventSetFree(cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceModifyDrainState function as declared in nvml/nvml.h +func nvmlDeviceModifyDrainState(PciInfo *PciInfo, NewState EnableState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cNewState, _ := (C.nvmlEnableState_t)(NewState), cgoAllocsUnknown + __ret := C.nvmlDeviceModifyDrainState(cPciInfo, cNewState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceQueryDrainState function as declared in nvml/nvml.h +func nvmlDeviceQueryDrainState(PciInfo *PciInfo, CurrentState *EnableState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cCurrentState, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(CurrentState)), cgoAllocsUnknown + __ret := C.nvmlDeviceQueryDrainState(cPciInfo, cCurrentState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRemoveGpu_v2 function as declared in nvml/nvml.h +func nvmlDeviceRemoveGpu_v2(PciInfo *PciInfo, GpuState DetachGpuState, LinkState PcieLinkState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cGpuState, _ := (C.nvmlDetachGpuState_t)(GpuState), cgoAllocsUnknown + cLinkState, _ := (C.nvmlPcieLinkState_t)(LinkState), cgoAllocsUnknown + __ret := C.nvmlDeviceRemoveGpu_v2(cPciInfo, cGpuState, cLinkState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceDiscoverGpus function as declared in nvml/nvml.h +func nvmlDeviceDiscoverGpus(PciInfo *PciInfo) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceDiscoverGpus(cPciInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFieldValues function as declared in nvml/nvml.h +func nvmlDeviceGetFieldValues(nvmlDevice nvmlDevice, ValuesCount int32, Values *FieldValue) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cValuesCount, _ := (C.int)(ValuesCount), cgoAllocsUnknown + cValues, _ := (*C.nvmlFieldValue_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFieldValues(cnvmlDevice, cValuesCount, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearFieldValues function as declared in nvml/nvml.h +func nvmlDeviceClearFieldValues(nvmlDevice nvmlDevice, ValuesCount int32, Values *FieldValue) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cValuesCount, _ := (C.int)(ValuesCount), cgoAllocsUnknown + cValues, _ := (*C.nvmlFieldValue_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearFieldValues(cnvmlDevice, cValuesCount, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVirtualizationMode function as declared in nvml/nvml.h +func nvmlDeviceGetVirtualizationMode(nvmlDevice nvmlDevice, PVirtualMode *GpuVirtualizationMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPVirtualMode, _ := (*C.nvmlGpuVirtualizationMode_t)(unsafe.Pointer(PVirtualMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVirtualizationMode(cnvmlDevice, cPVirtualMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHostVgpuMode function as declared in nvml/nvml.h +func nvmlDeviceGetHostVgpuMode(nvmlDevice nvmlDevice, PHostVgpuMode *HostVgpuMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPHostVgpuMode, _ := (*C.nvmlHostVgpuMode_t)(unsafe.Pointer(PHostVgpuMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHostVgpuMode(cnvmlDevice, cPHostVgpuMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVirtualizationMode function as declared in nvml/nvml.h +func nvmlDeviceSetVirtualizationMode(nvmlDevice nvmlDevice, VirtualMode GpuVirtualizationMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVirtualMode, _ := (C.nvmlGpuVirtualizationMode_t)(VirtualMode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVirtualizationMode(cnvmlDevice, cVirtualMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuHeterogeneousMode function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice nvmlDevice, PHeterogeneousMode *VgpuHeterogeneousMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPHeterogeneousMode, _ := (*C.nvmlVgpuHeterogeneousMode_t)(unsafe.Pointer(PHeterogeneousMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuHeterogeneousMode(cnvmlDevice, cPHeterogeneousMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVgpuHeterogeneousMode function as declared in nvml/nvml.h +func nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice nvmlDevice, PHeterogeneousMode *VgpuHeterogeneousMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPHeterogeneousMode, _ := (*C.nvmlVgpuHeterogeneousMode_t)(unsafe.Pointer(PHeterogeneousMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVgpuHeterogeneousMode(cnvmlDevice, cPHeterogeneousMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetPlacementId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance nvmlVgpuInstance, PPlacement *VgpuPlacementId) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cPPlacement, _ := (*C.nvmlVgpuPlacementId_t)(unsafe.Pointer(PPlacement)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetPlacementId(cnvmlVgpuInstance, cPPlacement) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuTypeSupportedPlacements function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice nvmlDevice, nvmlVgpuTypeId nvmlVgpuTypeId, PPlacementList *VgpuPlacementList) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cPPlacementList, _ := (*C.nvmlVgpuPlacementList_t)(unsafe.Pointer(PPlacementList)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuTypeSupportedPlacements(cnvmlDevice, cnvmlVgpuTypeId, cPPlacementList) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuTypeCreatablePlacements function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice nvmlDevice, nvmlVgpuTypeId nvmlVgpuTypeId, PPlacementList *VgpuPlacementList) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cPPlacementList, _ := (*C.nvmlVgpuPlacementList_t)(unsafe.Pointer(PPlacementList)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuTypeCreatablePlacements(cnvmlDevice, cnvmlVgpuTypeId, cPPlacementList) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetGspHeapSize function as declared in nvml/nvml.h +func nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId nvmlVgpuTypeId, GspHeapSize *uint64) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cGspHeapSize, _ := (*C.ulonglong)(unsafe.Pointer(GspHeapSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetGspHeapSize(cnvmlVgpuTypeId, cGspHeapSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFbReservation function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId nvmlVgpuTypeId, FbReservation *uint64) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cFbReservation, _ := (*C.ulonglong)(unsafe.Pointer(FbReservation)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFbReservation(cnvmlVgpuTypeId, cFbReservation) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVgpuCapabilities function as declared in nvml/nvml.h +func nvmlDeviceSetVgpuCapabilities(nvmlDevice nvmlDevice, Capability DeviceVgpuCapability, State EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCapability, _ := (C.nvmlDeviceVgpuCapability_t)(Capability), cgoAllocsUnknown + cState, _ := (C.nvmlEnableState_t)(State), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVgpuCapabilities(cnvmlDevice, cCapability, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v4 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice nvmlDevice, PGridLicensableFeatures *GridLicensableFeatures) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v4(cnvmlDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuDriverCapabilities function as declared in nvml/nvml.h +func nvmlGetVgpuDriverCapabilities(Capability VgpuDriverCapability, CapResult *uint32) Return { + cCapability, _ := (C.nvmlVgpuDriverCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuDriverCapabilities(cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuCapabilities function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuCapabilities(nvmlDevice nvmlDevice, Capability DeviceVgpuCapability, CapResult *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCapability, _ := (C.nvmlDeviceVgpuCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuCapabilities(cnvmlDevice, cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedVgpus(nvmlDevice nvmlDevice, VgpuCount *uint32, VgpuTypeIds *nvmlVgpuTypeId) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuTypeIds, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeIds)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedVgpus(cnvmlDevice, cVgpuCount, cVgpuTypeIds) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCreatableVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetCreatableVgpus(nvmlDevice nvmlDevice, VgpuCount *uint32, VgpuTypeIds *nvmlVgpuTypeId) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuTypeIds, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeIds)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCreatableVgpus(cnvmlDevice, cVgpuCount, cVgpuTypeIds) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetClass function as declared in nvml/nvml.h +func nvmlVgpuTypeGetClass(nvmlVgpuTypeId nvmlVgpuTypeId, VgpuTypeClass *byte, Size *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cVgpuTypeClass, _ := (*C.char)(unsafe.Pointer(VgpuTypeClass)), cgoAllocsUnknown + cSize, _ := (*C.uint)(unsafe.Pointer(Size)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetClass(cnvmlVgpuTypeId, cVgpuTypeClass, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetName function as declared in nvml/nvml.h +func nvmlVgpuTypeGetName(nvmlVgpuTypeId nvmlVgpuTypeId, VgpuTypeName *byte, Size *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cVgpuTypeName, _ := (*C.char)(unsafe.Pointer(VgpuTypeName)), cgoAllocsUnknown + cSize, _ := (*C.uint)(unsafe.Pointer(Size)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetName(cnvmlVgpuTypeId, cVgpuTypeName, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetGpuInstanceProfileId function as declared in nvml/nvml.h +func nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId nvmlVgpuTypeId, GpuInstanceProfileId *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cGpuInstanceProfileId, _ := (*C.uint)(unsafe.Pointer(GpuInstanceProfileId)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetGpuInstanceProfileId(cnvmlVgpuTypeId, cGpuInstanceProfileId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetDeviceID function as declared in nvml/nvml.h +func nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId nvmlVgpuTypeId, DeviceID *uint64, SubsystemID *uint64) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cDeviceID, _ := (*C.ulonglong)(unsafe.Pointer(DeviceID)), cgoAllocsUnknown + cSubsystemID, _ := (*C.ulonglong)(unsafe.Pointer(SubsystemID)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetDeviceID(cnvmlVgpuTypeId, cDeviceID, cSubsystemID) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFramebufferSize function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId nvmlVgpuTypeId, FbSize *uint64) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cFbSize, _ := (*C.ulonglong)(unsafe.Pointer(FbSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFramebufferSize(cnvmlVgpuTypeId, cFbSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetNumDisplayHeads function as declared in nvml/nvml.h +func nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId nvmlVgpuTypeId, NumDisplayHeads *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cNumDisplayHeads, _ := (*C.uint)(unsafe.Pointer(NumDisplayHeads)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetNumDisplayHeads(cnvmlVgpuTypeId, cNumDisplayHeads) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetResolution function as declared in nvml/nvml.h +func nvmlVgpuTypeGetResolution(nvmlVgpuTypeId nvmlVgpuTypeId, DisplayIndex uint32, Xdim *uint32, Ydim *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cDisplayIndex, _ := (C.uint)(DisplayIndex), cgoAllocsUnknown + cXdim, _ := (*C.uint)(unsafe.Pointer(Xdim)), cgoAllocsUnknown + cYdim, _ := (*C.uint)(unsafe.Pointer(Ydim)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetResolution(cnvmlVgpuTypeId, cDisplayIndex, cXdim, cYdim) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetLicense function as declared in nvml/nvml.h +func nvmlVgpuTypeGetLicense(nvmlVgpuTypeId nvmlVgpuTypeId, VgpuTypeLicenseString *byte, Size uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cVgpuTypeLicenseString, _ := (*C.char)(unsafe.Pointer(VgpuTypeLicenseString)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetLicense(cnvmlVgpuTypeId, cVgpuTypeLicenseString, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFrameRateLimit function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId nvmlVgpuTypeId, FrameRateLimit *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cFrameRateLimit, _ := (*C.uint)(unsafe.Pointer(FrameRateLimit)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFrameRateLimit(cnvmlVgpuTypeId, cFrameRateLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetMaxInstances function as declared in nvml/nvml.h +func nvmlVgpuTypeGetMaxInstances(nvmlDevice nvmlDevice, nvmlVgpuTypeId nvmlVgpuTypeId, VgpuInstanceCount *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cVgpuInstanceCount, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceCount)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetMaxInstances(cnvmlDevice, cnvmlVgpuTypeId, cVgpuInstanceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetMaxInstancesPerVm function as declared in nvml/nvml.h +func nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId nvmlVgpuTypeId, VgpuInstanceCountPerVm *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cVgpuInstanceCountPerVm, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceCountPerVm)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetMaxInstancesPerVm(cnvmlVgpuTypeId, cVgpuInstanceCountPerVm) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetActiveVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetActiveVgpus(nvmlDevice nvmlDevice, VgpuCount *uint32, VgpuInstances *nvmlVgpuInstance) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuInstances, _ := (*C.nvmlVgpuInstance_t)(unsafe.Pointer(VgpuInstances)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetActiveVgpus(cnvmlDevice, cVgpuCount, cVgpuInstances) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetVmID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetVmID(nvmlVgpuInstance nvmlVgpuInstance, VmId *byte, Size uint32, VmIdType *VgpuVmIdType) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cVmId, _ := (*C.char)(unsafe.Pointer(VmId)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + cVmIdType, _ := (*C.nvmlVgpuVmIdType_t)(unsafe.Pointer(VmIdType)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetVmID(cnvmlVgpuInstance, cVmId, cSize, cVmIdType) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetUUID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetUUID(nvmlVgpuInstance nvmlVgpuInstance, Uuid *byte, Size uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cUuid, _ := (*C.char)(unsafe.Pointer(Uuid)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetUUID(cnvmlVgpuInstance, cUuid, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetVmDriverVersion function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance nvmlVgpuInstance, Version *byte, Length uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetVmDriverVersion(cnvmlVgpuInstance, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFbUsage function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance nvmlVgpuInstance, FbUsage *uint64) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cFbUsage, _ := (*C.ulonglong)(unsafe.Pointer(FbUsage)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFbUsage(cnvmlVgpuInstance, cFbUsage) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseStatus function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance nvmlVgpuInstance, Licensed *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cLicensed, _ := (*C.uint)(unsafe.Pointer(Licensed)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseStatus(cnvmlVgpuInstance, cLicensed) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetType function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetType(nvmlVgpuInstance nvmlVgpuInstance, nvmlVgpuTypeId *nvmlVgpuTypeId) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cnvmlVgpuTypeId, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(nvmlVgpuTypeId)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetType(cnvmlVgpuInstance, cnvmlVgpuTypeId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFrameRateLimit function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance nvmlVgpuInstance, FrameRateLimit *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cFrameRateLimit, _ := (*C.uint)(unsafe.Pointer(FrameRateLimit)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFrameRateLimit(cnvmlVgpuInstance, cFrameRateLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEccMode function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance nvmlVgpuInstance, EccMode *EnableState) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cEccMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(EccMode)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEccMode(cnvmlVgpuInstance, cEccMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderCapacity function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance nvmlVgpuInstance, EncoderCapacity *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cEncoderCapacity, _ := (*C.uint)(unsafe.Pointer(EncoderCapacity)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderCapacity(cnvmlVgpuInstance, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceSetEncoderCapacity function as declared in nvml/nvml.h +func nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance nvmlVgpuInstance, EncoderCapacity uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cEncoderCapacity, _ := (C.uint)(EncoderCapacity), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceSetEncoderCapacity(cnvmlVgpuInstance, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance nvmlVgpuInstance, SessionCount *uint32, AverageFps *uint32, AverageLatency *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cAverageFps, _ := (*C.uint)(unsafe.Pointer(AverageFps)), cgoAllocsUnknown + cAverageLatency, _ := (*C.uint)(unsafe.Pointer(AverageLatency)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderStats(cnvmlVgpuInstance, cSessionCount, cAverageFps, cAverageLatency) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderSessions function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance nvmlVgpuInstance, SessionCount *uint32, SessionInfo *EncoderSessionInfo) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlEncoderSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderSessions(cnvmlVgpuInstance, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFBCStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance nvmlVgpuInstance, FbcStats *FBCStats) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cFbcStats, _ := (*C.nvmlFBCStats_t)(unsafe.Pointer(FbcStats)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFBCStats(cnvmlVgpuInstance, cFbcStats) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFBCSessions function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance nvmlVgpuInstance, SessionCount *uint32, SessionInfo *FBCSessionInfo) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlFBCSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFBCSessions(cnvmlVgpuInstance, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetGpuInstanceId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance nvmlVgpuInstance, GpuInstanceId *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cGpuInstanceId, _ := (*C.uint)(unsafe.Pointer(GpuInstanceId)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetGpuInstanceId(cnvmlVgpuInstance, cGpuInstanceId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetGpuPciId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance nvmlVgpuInstance, VgpuPciId *byte, Length *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cVgpuPciId, _ := (*C.char)(unsafe.Pointer(VgpuPciId)), cgoAllocsUnknown + cLength, _ := (*C.uint)(unsafe.Pointer(Length)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetGpuPciId(cnvmlVgpuInstance, cVgpuPciId, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetCapabilities function as declared in nvml/nvml.h +func nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId nvmlVgpuTypeId, Capability VgpuCapability, CapResult *uint32) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cCapability, _ := (C.nvmlVgpuCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetCapabilities(cnvmlVgpuTypeId, cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetMdevUUID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance nvmlVgpuInstance, MdevUuid *byte, Size uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cMdevUuid, _ := (*C.char)(unsafe.Pointer(MdevUuid)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetMdevUUID(cnvmlVgpuInstance, cMdevUuid, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetMetadata function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance nvmlVgpuInstance, nvmlVgpuMetadata *nvmlVgpuMetadata, BufferSize *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cnvmlVgpuMetadata, _ := (*C.nvmlVgpuMetadata_t)(unsafe.Pointer(nvmlVgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetMetadata(cnvmlVgpuInstance, cnvmlVgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuMetadata function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuMetadata(nvmlDevice nvmlDevice, PgpuMetadata *nvmlVgpuPgpuMetadata, BufferSize *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.nvmlVgpuPgpuMetadata_t)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuMetadata(cnvmlDevice, cPgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuCompatibility function as declared in nvml/nvml.h +func nvmlGetVgpuCompatibility(nvmlVgpuMetadata *nvmlVgpuMetadata, PgpuMetadata *nvmlVgpuPgpuMetadata, CompatibilityInfo *VgpuPgpuCompatibility) Return { + cnvmlVgpuMetadata, _ := (*C.nvmlVgpuMetadata_t)(unsafe.Pointer(nvmlVgpuMetadata)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.nvmlVgpuPgpuMetadata_t)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cCompatibilityInfo, _ := (*C.nvmlVgpuPgpuCompatibility_t)(unsafe.Pointer(CompatibilityInfo)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuCompatibility(cnvmlVgpuMetadata, cPgpuMetadata, cCompatibilityInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPgpuMetadataString function as declared in nvml/nvml.h +func nvmlDeviceGetPgpuMetadataString(nvmlDevice nvmlDevice, PgpuMetadata *byte, BufferSize *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.char)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPgpuMetadataString(cnvmlDevice, cPgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuSchedulerLog function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuSchedulerLog(nvmlDevice nvmlDevice, PSchedulerLog *VgpuSchedulerLog) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPSchedulerLog, _ := (*C.nvmlVgpuSchedulerLog_t)(unsafe.Pointer(PSchedulerLog)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuSchedulerLog(cnvmlDevice, cPSchedulerLog) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuSchedulerState function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuSchedulerState(nvmlDevice nvmlDevice, PSchedulerState *VgpuSchedulerGetState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPSchedulerState, _ := (*C.nvmlVgpuSchedulerGetState_t)(unsafe.Pointer(PSchedulerState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuSchedulerState(cnvmlDevice, cPSchedulerState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuSchedulerCapabilities function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice nvmlDevice, PCapabilities *VgpuSchedulerCapabilities) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPCapabilities, _ := (*C.nvmlVgpuSchedulerCapabilities_t)(unsafe.Pointer(PCapabilities)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuSchedulerCapabilities(cnvmlDevice, cPCapabilities) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVgpuSchedulerState function as declared in nvml/nvml.h +func nvmlDeviceSetVgpuSchedulerState(nvmlDevice nvmlDevice, PSchedulerState *VgpuSchedulerSetState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPSchedulerState, _ := (*C.nvmlVgpuSchedulerSetState_t)(unsafe.Pointer(PSchedulerState)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVgpuSchedulerState(cnvmlDevice, cPSchedulerState) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuVersion function as declared in nvml/nvml.h +func nvmlGetVgpuVersion(Supported *VgpuVersion, Current *VgpuVersion) Return { + cSupported, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(Supported)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuVersion(cSupported, cCurrent) + __v := (Return)(__ret) + return __v +} + +// nvmlSetVgpuVersion function as declared in nvml/nvml.h +func nvmlSetVgpuVersion(VgpuVersion *VgpuVersion) Return { + cVgpuVersion, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(VgpuVersion)), cgoAllocsUnknown + __ret := C.nvmlSetVgpuVersion(cVgpuVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuUtilization(nvmlDevice nvmlDevice, LastSeenTimeStamp uint64, SampleValType *ValueType, VgpuInstanceSamplesCount *uint32, UtilizationSamples *VgpuInstanceUtilizationSample) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cSampleValType, _ := (*C.nvmlValueType_t)(unsafe.Pointer(SampleValType)), cgoAllocsUnknown + cVgpuInstanceSamplesCount, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceSamplesCount)), cgoAllocsUnknown + cUtilizationSamples, _ := (*C.nvmlVgpuInstanceUtilizationSample_t)(unsafe.Pointer(UtilizationSamples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuUtilization(cnvmlDevice, cLastSeenTimeStamp, cSampleValType, cVgpuInstanceSamplesCount, cUtilizationSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuInstancesUtilizationInfo function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice nvmlDevice, VgpuUtilInfo *VgpuInstancesUtilizationInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVgpuUtilInfo, _ := (*C.nvmlVgpuInstancesUtilizationInfo_t)(unsafe.Pointer(VgpuUtilInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuInstancesUtilizationInfo(cnvmlDevice, cVgpuUtilInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuProcessUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuProcessUtilization(nvmlDevice nvmlDevice, LastSeenTimeStamp uint64, VgpuProcessSamplesCount *uint32, UtilizationSamples *VgpuProcessUtilizationSample) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cVgpuProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(VgpuProcessSamplesCount)), cgoAllocsUnknown + cUtilizationSamples, _ := (*C.nvmlVgpuProcessUtilizationSample_t)(unsafe.Pointer(UtilizationSamples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuProcessUtilization(cnvmlDevice, cLastSeenTimeStamp, cVgpuProcessSamplesCount, cUtilizationSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuProcessesUtilizationInfo function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice nvmlDevice, VgpuProcUtilInfo *VgpuProcessesUtilizationInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVgpuProcUtilInfo, _ := (*C.nvmlVgpuProcessesUtilizationInfo_t)(unsafe.Pointer(VgpuProcUtilInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuProcessesUtilizationInfo(cnvmlDevice, cVgpuProcUtilInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingMode function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance nvmlVgpuInstance, Mode *EnableState) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingMode(cnvmlVgpuInstance, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingPids function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance nvmlVgpuInstance, Count *uint32, Pids *uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cPids, _ := (*C.uint)(unsafe.Pointer(Pids)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingPids(cnvmlVgpuInstance, cCount, cPids) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance nvmlVgpuInstance, Pid uint32, Stats *AccountingStats) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cStats, _ := (*C.nvmlAccountingStats_t)(unsafe.Pointer(Stats)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingStats(cnvmlVgpuInstance, cPid, cStats) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceClearAccountingPids function as declared in nvml/nvml.h +func nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance nvmlVgpuInstance) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceClearAccountingPids(cnvmlVgpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseInfo_v2 function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance nvmlVgpuInstance, LicenseInfo *VgpuLicenseInfo) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cLicenseInfo, _ := (*C.nvmlVgpuLicenseInfo_t)(unsafe.Pointer(LicenseInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseInfo_v2(cnvmlVgpuInstance, cLicenseInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGetExcludedDeviceCount function as declared in nvml/nvml.h +func nvmlGetExcludedDeviceCount(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlGetExcludedDeviceCount(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGetExcludedDeviceInfoByIndex function as declared in nvml/nvml.h +func nvmlGetExcludedDeviceInfoByIndex(Index uint32, Info *ExcludedDeviceInfo) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cInfo, _ := (*C.nvmlExcludedDeviceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGetExcludedDeviceInfoByIndex(cIndex, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMigMode function as declared in nvml/nvml.h +func nvmlDeviceSetMigMode(nvmlDevice nvmlDevice, Mode uint32, ActivationStatus *Return) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMode, _ := (C.uint)(Mode), cgoAllocsUnknown + cActivationStatus, _ := (*C.nvmlReturn_t)(unsafe.Pointer(ActivationStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMigMode(cnvmlDevice, cMode, cActivationStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMigMode function as declared in nvml/nvml.h +func nvmlDeviceGetMigMode(nvmlDevice nvmlDevice, CurrentMode *uint32, PendingMode *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrentMode, _ := (*C.uint)(unsafe.Pointer(CurrentMode)), cgoAllocsUnknown + cPendingMode, _ := (*C.uint)(unsafe.Pointer(PendingMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMigMode(cnvmlDevice, cCurrentMode, cPendingMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceProfileInfo function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceProfileInfo(nvmlDevice nvmlDevice, Profile uint32, Info *GpuInstanceProfileInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceProfileInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceProfileInfo(cnvmlDevice, cProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceProfileInfoV function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice nvmlDevice, Profile uint32, Info *GpuInstanceProfileInfo_v2) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceProfileInfo_v2_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceProfileInfoV(cnvmlDevice, cProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstancePossiblePlacements_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice nvmlDevice, ProfileId uint32, Placements *GpuInstancePlacement, Count *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstancePossiblePlacements_v2(cnvmlDevice, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceRemainingCapacity function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice nvmlDevice, ProfileId uint32, Count *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceRemainingCapacity(cnvmlDevice, cProfileId, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCreateGpuInstance function as declared in nvml/nvml.h +func nvmlDeviceCreateGpuInstance(nvmlDevice nvmlDevice, ProfileId uint32, nvmlGpuInstance *nvmlGpuInstance) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cnvmlGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(nvmlGpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceCreateGpuInstance(cnvmlDevice, cProfileId, cnvmlGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCreateGpuInstanceWithPlacement function as declared in nvml/nvml.h +func nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice nvmlDevice, ProfileId uint32, Placement *GpuInstancePlacement, nvmlGpuInstance *nvmlGpuInstance) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacement, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placement)), cgoAllocsUnknown + cnvmlGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(nvmlGpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceCreateGpuInstanceWithPlacement(cnvmlDevice, cProfileId, cPlacement, cnvmlGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceDestroy function as declared in nvml/nvml.h +func nvmlGpuInstanceDestroy(nvmlGpuInstance nvmlGpuInstance) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceDestroy(cnvmlGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstances function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstances(nvmlDevice nvmlDevice, ProfileId uint32, GpuInstances *nvmlGpuInstance, Count *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cGpuInstances, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstances)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstances(cnvmlDevice, cProfileId, cGpuInstances, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceById function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceById(nvmlDevice nvmlDevice, Id uint32, nvmlGpuInstance *nvmlGpuInstance) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cId, _ := (C.uint)(Id), cgoAllocsUnknown + cnvmlGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(nvmlGpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceById(cnvmlDevice, cId, cnvmlGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetInfo function as declared in nvml/nvml.h +func nvmlGpuInstanceGetInfo(nvmlGpuInstance nvmlGpuInstance, Info *nvmlGpuInstanceInfo) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetInfo(cnvmlGpuInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceProfileInfo function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceProfileInfo(nvmlGpuInstance nvmlGpuInstance, Profile uint32, EngProfile uint32, Info *ComputeInstanceProfileInfo) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cEngProfile, _ := (C.uint)(EngProfile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceProfileInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceProfileInfo(cnvmlGpuInstance, cProfile, cEngProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceProfileInfoV function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance nvmlGpuInstance, Profile uint32, EngProfile uint32, Info *ComputeInstanceProfileInfo_v2) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cEngProfile, _ := (C.uint)(EngProfile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceProfileInfo_v2_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceProfileInfoV(cnvmlGpuInstance, cProfile, cEngProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceRemainingCapacity function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance nvmlGpuInstance, ProfileId uint32, Count *uint32) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceRemainingCapacity(cnvmlGpuInstance, cProfileId, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstancePossiblePlacements function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstancePossiblePlacements(nvmlGpuInstance nvmlGpuInstance, ProfileId uint32, Placements *ComputeInstancePlacement, Count *uint32) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlComputeInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstancePossiblePlacements(cnvmlGpuInstance, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceCreateComputeInstance function as declared in nvml/nvml.h +func nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance nvmlGpuInstance, ProfileId uint32, nvmlComputeInstance *nvmlComputeInstance) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cnvmlComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(nvmlComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceCreateComputeInstance(cnvmlGpuInstance, cProfileId, cnvmlComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceCreateComputeInstanceWithPlacement function as declared in nvml/nvml.h +func nvmlGpuInstanceCreateComputeInstanceWithPlacement(nvmlGpuInstance nvmlGpuInstance, ProfileId uint32, Placement *ComputeInstancePlacement, nvmlComputeInstance *nvmlComputeInstance) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacement, _ := (*C.nvmlComputeInstancePlacement_t)(unsafe.Pointer(Placement)), cgoAllocsUnknown + cnvmlComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(nvmlComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceCreateComputeInstanceWithPlacement(cnvmlGpuInstance, cProfileId, cPlacement, cnvmlComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceDestroy function as declared in nvml/nvml.h +func nvmlComputeInstanceDestroy(nvmlComputeInstance nvmlComputeInstance) Return { + cnvmlComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&nvmlComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceDestroy(cnvmlComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstances function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance nvmlGpuInstance, ProfileId uint32, ComputeInstances *nvmlComputeInstance, Count *uint32) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cComputeInstances, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstances)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstances(cnvmlGpuInstance, cProfileId, cComputeInstances, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceById function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance nvmlGpuInstance, Id uint32, nvmlComputeInstance *nvmlComputeInstance) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cId, _ := (C.uint)(Id), cgoAllocsUnknown + cnvmlComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(nvmlComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceById(cnvmlGpuInstance, cId, cnvmlComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceGetInfo_v2 function as declared in nvml/nvml.h +func nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance nvmlComputeInstance, Info *nvmlComputeInstanceInfo) Return { + cnvmlComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&nvmlComputeInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceGetInfo_v2(cnvmlComputeInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceIsMigDeviceHandle function as declared in nvml/nvml.h +func nvmlDeviceIsMigDeviceHandle(nvmlDevice nvmlDevice, IsMigDevice *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIsMigDevice, _ := (*C.uint)(unsafe.Pointer(IsMigDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceIsMigDeviceHandle(cnvmlDevice, cIsMigDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceId function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceId(nvmlDevice nvmlDevice, Id *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cId, _ := (*C.uint)(unsafe.Pointer(Id)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceId(cnvmlDevice, cId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeInstanceId function as declared in nvml/nvml.h +func nvmlDeviceGetComputeInstanceId(nvmlDevice nvmlDevice, Id *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cId, _ := (*C.uint)(unsafe.Pointer(Id)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeInstanceId(cnvmlDevice, cId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxMigDeviceCount function as declared in nvml/nvml.h +func nvmlDeviceGetMaxMigDeviceCount(nvmlDevice nvmlDevice, Count *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxMigDeviceCount(cnvmlDevice, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMigDeviceHandleByIndex function as declared in nvml/nvml.h +func nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice nvmlDevice, Index uint32, MigDevice *nvmlDevice) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cMigDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(MigDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMigDeviceHandleByIndex(cnvmlDevice, cIndex, cMigDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDeviceHandleFromMigDeviceHandle function as declared in nvml/nvml.h +func nvmlDeviceGetDeviceHandleFromMigDeviceHandle(MigDevice nvmlDevice, nvmlDevice *nvmlDevice) Return { + cMigDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&MigDevice)), cgoAllocsUnknown + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDeviceHandleFromMigDeviceHandle(cMigDevice, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmMetricsGet function as declared in nvml/nvml.h +func nvmlGpmMetricsGet(MetricsGet *nvmlGpmMetricsGetType) Return { + cMetricsGet, _ := (*C.nvmlGpmMetricsGet_t)(unsafe.Pointer(MetricsGet)), cgoAllocsUnknown + __ret := C.nvmlGpmMetricsGet(cMetricsGet) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmSampleFree function as declared in nvml/nvml.h +func nvmlGpmSampleFree(nvmlGpmSample nvmlGpmSample) Return { + cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleFree(cnvmlGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmSampleAlloc function as declared in nvml/nvml.h +func nvmlGpmSampleAlloc(nvmlGpmSample *nvmlGpmSample) Return { + cnvmlGpmSample, _ := (*C.nvmlGpmSample_t)(unsafe.Pointer(nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleAlloc(cnvmlGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmSampleGet function as declared in nvml/nvml.h +func nvmlGpmSampleGet(nvmlDevice nvmlDevice, nvmlGpmSample nvmlGpmSample) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleGet(cnvmlDevice, cnvmlGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmMigSampleGet function as declared in nvml/nvml.h +func nvmlGpmMigSampleGet(nvmlDevice nvmlDevice, GpuInstanceId uint32, nvmlGpmSample nvmlGpmSample) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuInstanceId, _ := (C.uint)(GpuInstanceId), cgoAllocsUnknown + cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmMigSampleGet(cnvmlDevice, cGpuInstanceId, cnvmlGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmQueryDeviceSupport function as declared in nvml/nvml.h +func nvmlGpmQueryDeviceSupport(nvmlDevice nvmlDevice, GpmSupport *GpmSupport) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpmSupport, _ := (*C.nvmlGpmSupport_t)(unsafe.Pointer(GpmSupport)), cgoAllocsUnknown + __ret := C.nvmlGpmQueryDeviceSupport(cnvmlDevice, cGpmSupport) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmQueryIfStreamingEnabled function as declared in nvml/nvml.h +func nvmlGpmQueryIfStreamingEnabled(nvmlDevice nvmlDevice, State *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cState, _ := (*C.uint)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlGpmQueryIfStreamingEnabled(cnvmlDevice, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmSetStreamingEnabled function as declared in nvml/nvml.h +func nvmlGpmSetStreamingEnabled(nvmlDevice nvmlDevice, State uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cState, _ := (C.uint)(State), cgoAllocsUnknown + __ret := C.nvmlGpmSetStreamingEnabled(cnvmlDevice, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetNvLinkDeviceLowPowerThreshold function as declared in nvml/nvml.h +func nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice nvmlDevice, Info *NvLinkPowerThres) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlNvLinkPowerThres_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetNvLinkDeviceLowPowerThreshold(cnvmlDevice, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemSetNvlinkBwMode function as declared in nvml/nvml.h +func nvmlSystemSetNvlinkBwMode(NvlinkBwMode uint32) Return { + cNvlinkBwMode, _ := (C.uint)(NvlinkBwMode), cgoAllocsUnknown + __ret := C.nvmlSystemSetNvlinkBwMode(cNvlinkBwMode) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetNvlinkBwMode function as declared in nvml/nvml.h +func nvmlSystemGetNvlinkBwMode(NvlinkBwMode *uint32) Return { + cNvlinkBwMode, _ := (*C.uint)(unsafe.Pointer(NvlinkBwMode)), cgoAllocsUnknown + __ret := C.nvmlSystemGetNvlinkBwMode(cNvlinkBwMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetPowerManagementLimit_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice nvmlDevice, PowerValue *PowerValue_v2) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPowerValue, _ := (*C.nvmlPowerValue_v2_t)(unsafe.Pointer(PowerValue)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPowerManagementLimit_v2(cnvmlDevice, cPowerValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSramEccErrorStatus function as declared in nvml/nvml.h +func nvmlDeviceGetSramEccErrorStatus(nvmlDevice nvmlDevice, Status *EccSramErrorStatus) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cStatus, _ := (*C.nvmlEccSramErrorStatus_t)(unsafe.Pointer(Status)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSramEccErrorStatus(cnvmlDevice, cStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlInit_v1 function as declared in nvml/nvml.h +func nvmlInit_v1() Return { + __ret := C.nvmlInit() + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCount_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetCount_v1(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCount(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByIndex_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByIndex_v1(Index uint32, nvmlDevice *nvmlDevice) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByIndex(cIndex, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByPciBusId_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByPciBusId_v1(PciBusId string, nvmlDevice *nvmlDevice) Return { + cPciBusId, _ := unpackPCharString(PciBusId) + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByPciBusId(cPciBusId, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v1(nvmlDevice nvmlDevice, Pci *PciInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo(cnvmlDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v2(nvmlDevice nvmlDevice, Pci *PciInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo_v2(cnvmlDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemotePciInfo_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemotePciInfo_v1(nvmlDevice nvmlDevice, Link uint32, Pci *PciInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemotePciInfo(cnvmlDevice, cLink, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v1(nvmlDevice nvmlDevice, PGridLicensableFeatures *GridLicensableFeatures) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures(cnvmlDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v2(nvmlDevice nvmlDevice, PGridLicensableFeatures *GridLicensableFeatures) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v2(cnvmlDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v3(nvmlDevice nvmlDevice, PGridLicensableFeatures *GridLicensableFeatures) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v3(cnvmlDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRemoveGpu_v1 function as declared in nvml/nvml.h +func nvmlDeviceRemoveGpu_v1(PciInfo *PciInfo) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceRemoveGpu(cPciInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetWait_v1 function as declared in nvml/nvml.h +func nvmlEventSetWait_v1(Set nvmlEventSet, Data *nvmlEventData, Timeoutms uint32) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + cData, _ := (*C.nvmlEventData_t)(unsafe.Pointer(Data)), cgoAllocsUnknown + cTimeoutms, _ := (C.uint)(Timeoutms), cgoAllocsUnknown + __ret := C.nvmlEventSetWait(cSet, cData, cTimeoutms) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAttributes_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetAttributes_v1(nvmlDevice nvmlDevice, Attributes *DeviceAttributes) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cAttributes, _ := (*C.nvmlDeviceAttributes_t)(unsafe.Pointer(Attributes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAttributes(cnvmlDevice, cAttributes) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceGetInfo_v1 function as declared in nvml/nvml.h +func nvmlComputeInstanceGetInfo_v1(nvmlComputeInstance nvmlComputeInstance, Info *nvmlComputeInstanceInfo) Return { + cnvmlComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&nvmlComputeInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceGetInfo(cnvmlComputeInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v1(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v2(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses_v2(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v1(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v2(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses_v2(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v1(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v2(nvmlDevice nvmlDevice, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses_v2(cnvmlDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstancePossiblePlacements_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstancePossiblePlacements_v1(nvmlDevice nvmlDevice, ProfileId uint32, Placements *GpuInstancePlacement, Count *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstancePossiblePlacements(cnvmlDevice, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseInfo_v1 function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseInfo_v1(nvmlVgpuInstance nvmlVgpuInstance, LicenseInfo *VgpuLicenseInfo) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cLicenseInfo, _ := (*C.nvmlVgpuLicenseInfo_t)(unsafe.Pointer(LicenseInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseInfo(cnvmlVgpuInstance, cLicenseInfo) + __v := (Return)(__ret) + return __v +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h new file mode 100644 index 00000000000..1e4eb12dc1b --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h @@ -0,0 +1,11101 @@ +/*** NVML VERSION: 12.4.127 ***/ +/*** From https://api.anaconda.org/download/nvidia/cuda-nvml-dev/12.4.127/linux-64/cuda-nvml-dev-12.4.127-0.tar.bz2 ***/ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. Users and possessors of this source code + * are hereby granted a nonexclusive, royalty-free license to use this code + * in individual and commercial software. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +/* +NVML API Reference + +The NVIDIA Management Library (NVML) is a C-based programmatic interface for monitoring and +managing various states within NVIDIA Tesla &tm; GPUs. It is intended to be a platform for building +3rd party applications, and is also the underlying library for the NVIDIA-supported nvidia-smi +tool. NVML is thread-safe so it is safe to make simultaneous NVML calls from multiple threads. + +API Documentation + +Supported platforms: +- Windows: Windows Server 2008 R2 64bit, Windows Server 2012 R2 64bit, Windows 7 64bit, Windows 8 64bit, Windows 10 64bit +- Linux: 32-bit and 64-bit +- Hypervisors: Windows Server 2008R2/2012 Hyper-V 64bit, Citrix XenServer 6.2 SP1+, VMware ESX 5.1/5.5 + +Supported products: +- Full Support + - All Tesla products, starting with the Fermi architecture + - All Quadro products, starting with the Fermi architecture + - All vGPU Software products, starting with the Kepler architecture + - Selected GeForce Titan products +- Limited Support + - All Geforce products, starting with the Fermi architecture + +The NVML library can be found at \%ProgramW6432\%\\"NVIDIA Corporation"\\NVSMI\\ on Windows. It is +not be added to the system path by default. To dynamically link to NVML, add this path to the PATH +environmental variable. To dynamically load NVML, call LoadLibrary with this path. + +On Linux the NVML library will be found on the standard library path. For 64 bit Linux, both the 32 bit +and 64 bit NVML libraries will be installed. + +Online documentation for this library is available at http://docs.nvidia.com/deploy/nvml-api/index.html +*/ + +#ifndef __nvml_nvml_h__ +#define __nvml_nvml_h__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * On Windows, set up methods for DLL export + * define NVML_STATIC_IMPORT when using nvml_loader library + */ +#if defined _WINDOWS + #if !defined NVML_STATIC_IMPORT + #if defined NVML_LIB_EXPORT + #define DECLDIR __declspec(dllexport) + #else + #define DECLDIR __declspec(dllimport) + #endif + #else + #define DECLDIR + #endif +#else + #define DECLDIR +#endif + +/** + * NVML API versioning support + */ +#define NVML_API_VERSION 12 +#define NVML_API_VERSION_STR "12" +/** + * Defining NVML_NO_UNVERSIONED_FUNC_DEFS will disable "auto upgrading" of APIs. + * e.g. the user will have to call nvmlInit_v2 instead of nvmlInit. Enable this + * guard if you need to support older versions of the API + */ +#ifndef NVML_NO_UNVERSIONED_FUNC_DEFS + #define nvmlInit nvmlInit_v2 + #define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v3 + #define nvmlDeviceGetCount nvmlDeviceGetCount_v2 + #define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 + #define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 + #define nvmlDeviceGetNvLinkRemotePciInfo nvmlDeviceGetNvLinkRemotePciInfo_v2 + #define nvmlDeviceRemoveGpu nvmlDeviceRemoveGpu_v2 + #define nvmlDeviceGetGridLicensableFeatures nvmlDeviceGetGridLicensableFeatures_v4 + #define nvmlEventSetWait nvmlEventSetWait_v2 + #define nvmlDeviceGetAttributes nvmlDeviceGetAttributes_v2 + #define nvmlComputeInstanceGetInfo nvmlComputeInstanceGetInfo_v2 + #define nvmlDeviceGetComputeRunningProcesses nvmlDeviceGetComputeRunningProcesses_v3 + #define nvmlDeviceGetGraphicsRunningProcesses nvmlDeviceGetGraphicsRunningProcesses_v3 + #define nvmlDeviceGetMPSComputeRunningProcesses nvmlDeviceGetMPSComputeRunningProcesses_v3 + #define nvmlBlacklistDeviceInfo_t nvmlExcludedDeviceInfo_t + #define nvmlGetBlacklistDeviceCount nvmlGetExcludedDeviceCount + #define nvmlGetBlacklistDeviceInfoByIndex nvmlGetExcludedDeviceInfoByIndex + #define nvmlDeviceGetGpuInstancePossiblePlacements nvmlDeviceGetGpuInstancePossiblePlacements_v2 + #define nvmlVgpuInstanceGetLicenseInfo nvmlVgpuInstanceGetLicenseInfo_v2 +#endif // #ifndef NVML_NO_UNVERSIONED_FUNC_DEFS + +#define NVML_STRUCT_VERSION(data, ver) (unsigned int)(sizeof(nvml ## data ## _v ## ver ## _t) | \ + (ver << 24U)) + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceStructs Device Structs + * @{ + */ +/***************************************************************************************************/ + +/** + * Special constant that some fields take when they are not available. + * Used when only part of the struct is not available. + * + * Each structure explicitly states when to check for this value. + */ +#define NVML_VALUE_NOT_AVAILABLE (-1) + +typedef struct +{ + struct nvmlDevice_st* handle; +} nvmlDevice_t; + +/** + * Buffer size guaranteed to be large enough for pci bus id + */ +#define NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE 32 + +/** + * Buffer size guaranteed to be large enough for pci bus id for ::busIdLegacy + */ +#define NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE 16 + +/** + * PCI information about a GPU device. + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffffffff + unsigned int bus; //!< The bus on which the device resides, 0 to 0xff + unsigned int device; //!< The device's id on the bus, 0 to 31 + + unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id + unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID + + unsigned int baseClass; //!< The 8-bit PCI base class code + unsigned int subClass; //!< The 8-bit PCI sub class code + + char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) +} nvmlPciInfoExt_v1_t; +typedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t; +#define nvmlPciInfoExt_v1 NVML_STRUCT_VERSION(PciInfoExt, 1) + +/** + * PCI information about a GPU device. + */ +typedef struct nvmlPciInfo_st +{ + char busIdLegacy[NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE]; //!< The legacy tuple domain:bus:device.function PCI identifier (& NULL terminator) + unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffffffff + unsigned int bus; //!< The bus on which the device resides, 0 to 0xff + unsigned int device; //!< The device's id on the bus, 0 to 31 + unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id + + // Added in NVML 2.285 API + unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID + + char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) +} nvmlPciInfo_t; + +/** + * PCI format string for ::busIdLegacy + */ +#define NVML_DEVICE_PCI_BUS_ID_LEGACY_FMT "%04X:%02X:%02X.0" + +/** + * PCI format string for ::busId + */ +#define NVML_DEVICE_PCI_BUS_ID_FMT "%08X:%02X:%02X.0" + +/** + * Utility macro for filling the pci bus id format from a nvmlPciInfo_t + */ +#define NVML_DEVICE_PCI_BUS_ID_FMT_ARGS(pciInfo) (pciInfo)->domain, \ + (pciInfo)->bus, \ + (pciInfo)->device + +/** + * Detailed ECC error counts for a device. + * + * @deprecated Different GPU families can have different memory error counters + * See \ref nvmlDeviceGetMemoryErrorCounter + */ +typedef struct nvmlEccErrorCounts_st +{ + unsigned long long l1Cache; //!< L1 cache errors + unsigned long long l2Cache; //!< L2 cache errors + unsigned long long deviceMemory; //!< Device memory errors + unsigned long long registerFile; //!< Register file errors +} nvmlEccErrorCounts_t; + +/** + * Utilization information for a device. + * Each sample period may be between 1 second and 1/6 second, depending on the product being queried. + */ +typedef struct nvmlUtilization_st +{ + unsigned int gpu; //!< Percent of time over the past sample period during which one or more kernels was executing on the GPU + unsigned int memory; //!< Percent of time over the past sample period during which global (device) memory was being read or written +} nvmlUtilization_t; + +/** + * Memory allocation information for a device (v1). + * The total amount is equal to the sum of the amounts of free and used memory. + */ +typedef struct nvmlMemory_st +{ + unsigned long long total; //!< Total physical device memory (in bytes) + unsigned long long free; //!< Unallocated device memory (in bytes) + unsigned long long used; //!< Sum of Reserved and Allocated device memory (in bytes). + //!< Note that the driver/GPU always sets aside a small amount of memory for bookkeeping +} nvmlMemory_t; + +/** + * Memory allocation information for a device (v2). + * + * Version 2 adds versioning for the struct and the amount of system-reserved memory as an output. + */ +typedef struct nvmlMemory_v2_st +{ + unsigned int version; //!< Structure format version (must be 2) + unsigned long long total; //!< Total physical device memory (in bytes) + unsigned long long reserved; //!< Device memory (in bytes) reserved for system use (driver or firmware) + unsigned long long free; //!< Unallocated device memory (in bytes) + unsigned long long used; //!< Allocated device memory (in bytes). +} nvmlMemory_v2_t; + +#define nvmlMemory_v2 NVML_STRUCT_VERSION(Memory, 2) + +/** + * BAR1 Memory allocation Information for a device + */ +typedef struct nvmlBAR1Memory_st +{ + unsigned long long bar1Total; //!< Total BAR1 Memory (in bytes) + unsigned long long bar1Free; //!< Unallocated BAR1 Memory (in bytes) + unsigned long long bar1Used; //!< Allocated Used Memory (in bytes) +}nvmlBAR1Memory_t; + +/** + * Information about running compute processes on the GPU, legacy version + * for older versions of the API. + */ +typedef struct nvmlProcessInfo_v1_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver +} nvmlProcessInfo_v1_t; + +/** + * Information about running compute processes on the GPU + */ +typedef struct nvmlProcessInfo_v2_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is set to + // 0xFFFFFFFF otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to + // 0xFFFFFFFF otherwise. +} nvmlProcessInfo_v2_t, nvmlProcessInfo_t; + +/** + * Information about running process on the GPU with protected memory + */ +typedef struct +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is + // set to 0xFFFFFFFF otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId + // is set to 0xFFFFFFFF otherwise. + unsigned long long usedGpuCcProtectedMemory; //!< Amount of used GPU conf compute protected memory in bytes. +} nvmlProcessDetail_v1_t; + +/** + * Information about all running processes on the GPU for the given mode + */ +typedef struct +{ + unsigned int version; //!< Struct version, MUST be nvmlProcessDetailList_v1 + unsigned int mode; //!< Process mode(Compute/Graphics/MPSCompute) + unsigned int numProcArrayEntries; //!< Number of process entries in procArray + nvmlProcessDetail_v1_t *procArray; //!< Process array +} nvmlProcessDetailList_v1_t; + +typedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t; + +/** + * nvmlProcessDetailList version + */ +#define nvmlProcessDetailList_v1 NVML_STRUCT_VERSION(ProcessDetailList, 1) + +typedef struct nvmlDeviceAttributes_st +{ + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count + unsigned int gpuInstanceSliceCount; //!< GPU instance slice count + unsigned int computeInstanceSliceCount; //!< Compute instance slice count + unsigned long long memorySizeMB; //!< Device memory size (in MiB) +} nvmlDeviceAttributes_t; + +/** + * C2C Mode information for a device + */ +typedef struct +{ + unsigned int isC2cEnabled; +} nvmlC2cModeInfo_v1_t; + +#define nvmlC2cModeInfo_v1 NVML_STRUCT_VERSION(C2cModeInfo, 1) + +/** + * Possible values that classify the remap availability for each bank. The max + * field will contain the number of banks that have maximum remap availability + * (all reserved rows are available). None means that there are no reserved + * rows available. + */ +typedef struct nvmlRowRemapperHistogramValues_st +{ + unsigned int max; + unsigned int high; + unsigned int partial; + unsigned int low; + unsigned int none; +} nvmlRowRemapperHistogramValues_t; + +/** + * Enum to represent type of bridge chip + */ +typedef enum nvmlBridgeChipType_enum +{ + NVML_BRIDGE_CHIP_PLX = 0, + NVML_BRIDGE_CHIP_BRO4 = 1 +}nvmlBridgeChipType_t; + +/** + * Maximum number of NvLink links supported + */ +#define NVML_NVLINK_MAX_LINKS 18 + +/** + * Enum to represent the NvLink utilization counter packet units + */ +typedef enum nvmlNvLinkUtilizationCountUnits_enum +{ + NVML_NVLINK_COUNTER_UNIT_CYCLES = 0, // count by cycles + NVML_NVLINK_COUNTER_UNIT_PACKETS = 1, // count by packets + NVML_NVLINK_COUNTER_UNIT_BYTES = 2, // count by bytes + NVML_NVLINK_COUNTER_UNIT_RESERVED = 3, // count reserved for internal use + // this must be last + NVML_NVLINK_COUNTER_UNIT_COUNT +} nvmlNvLinkUtilizationCountUnits_t; + +/** + * Enum to represent the NvLink utilization counter packet types to count + * ** this is ONLY applicable with the units as packets or bytes + * ** as specified in \a nvmlNvLinkUtilizationCountUnits_t + * ** all packet filter descriptions are target GPU centric + * ** these can be "OR'd" together + */ +typedef enum nvmlNvLinkUtilizationCountPktTypes_enum +{ + NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1, // no operation packets + NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2, // read packets + NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4, // write packets + NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8, // reduction atomic requests + NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10, // non-reduction atomic requests + NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20, // flush requests + NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40, // responses with data + NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80, // responses without data + NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF // all packets +} nvmlNvLinkUtilizationCountPktTypes_t; + +/** + * Struct to define the NVLINK counter controls + */ +typedef struct nvmlNvLinkUtilizationControl_st +{ + nvmlNvLinkUtilizationCountUnits_t units; + nvmlNvLinkUtilizationCountPktTypes_t pktfilter; +} nvmlNvLinkUtilizationControl_t; + +/** + * Enum to represent NvLink queryable capabilities + */ +typedef enum nvmlNvLinkCapability_enum +{ + NVML_NVLINK_CAP_P2P_SUPPORTED = 0, // P2P over NVLink is supported + NVML_NVLINK_CAP_SYSMEM_ACCESS = 1, // Access to system memory is supported + NVML_NVLINK_CAP_P2P_ATOMICS = 2, // P2P atomics are supported + NVML_NVLINK_CAP_SYSMEM_ATOMICS= 3, // System memory atomics are supported + NVML_NVLINK_CAP_SLI_BRIDGE = 4, // SLI is supported over this link + NVML_NVLINK_CAP_VALID = 5, // Link is supported on this device + // should be last + NVML_NVLINK_CAP_COUNT +} nvmlNvLinkCapability_t; + +/** + * Enum to represent NvLink queryable error counters + */ +typedef enum nvmlNvLinkErrorCounter_enum +{ + NVML_NVLINK_ERROR_DL_REPLAY = 0, // Data link transmit replay error counter + NVML_NVLINK_ERROR_DL_RECOVERY = 1, // Data link transmit recovery error counter + NVML_NVLINK_ERROR_DL_CRC_FLIT = 2, // Data link receive flow control digit CRC error counter + NVML_NVLINK_ERROR_DL_CRC_DATA = 3, // Data link receive data CRC error counter + NVML_NVLINK_ERROR_DL_ECC_DATA = 4, // Data link receive data ECC error counter + + // this must be last + NVML_NVLINK_ERROR_COUNT +} nvmlNvLinkErrorCounter_t; + +/** + * Enum to represent NvLink's remote device type + */ +typedef enum nvmlIntNvLinkDeviceType_enum +{ + NVML_NVLINK_DEVICE_TYPE_GPU = 0x00, + NVML_NVLINK_DEVICE_TYPE_IBMNPU = 0x01, + NVML_NVLINK_DEVICE_TYPE_SWITCH = 0x02, + NVML_NVLINK_DEVICE_TYPE_UNKNOWN = 0xFF +} nvmlIntNvLinkDeviceType_t; + +/** + * Represents level relationships within a system between two GPUs + * The enums are spaced to allow for future relationships + */ +typedef enum nvmlGpuLevel_enum +{ + NVML_TOPOLOGY_INTERNAL = 0, // e.g. Tesla K80 + NVML_TOPOLOGY_SINGLE = 10, // all devices that only need traverse a single PCIe switch + NVML_TOPOLOGY_MULTIPLE = 20, // all devices that need not traverse a host bridge + NVML_TOPOLOGY_HOSTBRIDGE = 30, // all devices that are connected to the same host bridge + NVML_TOPOLOGY_NODE = 40, // all devices that are connected to the same NUMA node but possibly multiple host bridges + NVML_TOPOLOGY_SYSTEM = 50 // all devices in the system + + // there is purposefully no COUNT here because of the need for spacing above +} nvmlGpuTopologyLevel_t; + +/* Compatibility for CPU->NODE renaming */ +#define NVML_TOPOLOGY_CPU NVML_TOPOLOGY_NODE + +/* P2P Capability Index Status*/ +typedef enum nvmlGpuP2PStatus_enum +{ + NVML_P2P_STATUS_OK = 0, + NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, + NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, + NVML_P2P_STATUS_GPU_NOT_SUPPORTED, + NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED, + NVML_P2P_STATUS_DISABLED_BY_REGKEY, + NVML_P2P_STATUS_NOT_SUPPORTED, + NVML_P2P_STATUS_UNKNOWN + +} nvmlGpuP2PStatus_t; + +/* P2P Capability Index*/ +typedef enum nvmlGpuP2PCapsIndex_enum +{ + NVML_P2P_CAPS_INDEX_READ = 0, + NVML_P2P_CAPS_INDEX_WRITE = 1, + NVML_P2P_CAPS_INDEX_NVLINK = 2, + NVML_P2P_CAPS_INDEX_ATOMICS = 3, + NVML_P2P_CAPS_INDEX_PCI = 4, + /* + * DO NOT USE! NVML_P2P_CAPS_INDEX_PROP is deprecated. + * Use NVML_P2P_CAPS_INDEX_PCI instead. + */ + NVML_P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PCI, + NVML_P2P_CAPS_INDEX_UNKNOWN = 5, +}nvmlGpuP2PCapsIndex_t; + +/** + * Maximum limit on Physical Bridges per Board + */ +#define NVML_MAX_PHYSICAL_BRIDGE (128) + +/** + * Information about the Bridge Chip Firmware + */ +typedef struct nvmlBridgeChipInfo_st +{ + nvmlBridgeChipType_t type; //!< Type of Bridge Chip + unsigned int fwVersion; //!< Firmware Version. 0=Version is unavailable +}nvmlBridgeChipInfo_t; + +/** + * This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate + * bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth. + */ +typedef struct nvmlBridgeChipHierarchy_st +{ + unsigned char bridgeCount; //!< Number of Bridge Chips on the Board + nvmlBridgeChipInfo_t bridgeChipInfo[NVML_MAX_PHYSICAL_BRIDGE]; //!< Hierarchy of Bridge Chips on the board +}nvmlBridgeChipHierarchy_t; + +/** + * Represents Type of Sampling Event + */ +typedef enum nvmlSamplingType_enum +{ + NVML_TOTAL_POWER_SAMPLES = 0, //!< To represent total power drawn by GPU + NVML_GPU_UTILIZATION_SAMPLES = 1, //!< To represent percent of time during which one or more kernels was executing on the GPU + NVML_MEMORY_UTILIZATION_SAMPLES = 2, //!< To represent percent of time during which global (device) memory was being read or written + NVML_ENC_UTILIZATION_SAMPLES = 3, //!< To represent percent of time during which NVENC remains busy + NVML_DEC_UTILIZATION_SAMPLES = 4, //!< To represent percent of time during which NVDEC remains busy + NVML_PROCESSOR_CLK_SAMPLES = 5, //!< To represent processor clock samples + NVML_MEMORY_CLK_SAMPLES = 6, //!< To represent memory clock samples + NVML_MODULE_POWER_SAMPLES = 7, //!< To represent module power samples for total module starting Grace Hopper + NVML_JPG_UTILIZATION_SAMPLES = 8, //!< To represent percent of time during which NVJPG remains busy + NVML_OFA_UTILIZATION_SAMPLES = 9, //!< To represent percent of time during which NVOFA remains busy + + // Keep this last + NVML_SAMPLINGTYPE_COUNT +}nvmlSamplingType_t; + +/** + * Represents the queryable PCIe utilization counters + */ +typedef enum nvmlPcieUtilCounter_enum +{ + NVML_PCIE_UTIL_TX_BYTES = 0, // 1KB granularity + NVML_PCIE_UTIL_RX_BYTES = 1, // 1KB granularity + + // Keep this last + NVML_PCIE_UTIL_COUNT +} nvmlPcieUtilCounter_t; + +/** + * Represents the type for sample value returned + */ +typedef enum nvmlValueType_enum +{ + NVML_VALUE_TYPE_DOUBLE = 0, + NVML_VALUE_TYPE_UNSIGNED_INT = 1, + NVML_VALUE_TYPE_UNSIGNED_LONG = 2, + NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3, + NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4, + NVML_VALUE_TYPE_SIGNED_INT = 5, + + // Keep this last + NVML_VALUE_TYPE_COUNT +}nvmlValueType_t; + + +/** + * Union to represent different types of Value + */ +typedef union nvmlValue_st +{ + double dVal; //!< If the value is double + int siVal; //!< If the value is signed int + unsigned int uiVal; //!< If the value is unsigned int + unsigned long ulVal; //!< If the value is unsigned long + unsigned long long ullVal; //!< If the value is unsigned long long + signed long long sllVal; //!< If the value is signed long long +}nvmlValue_t; + +/** + * Information for Sample + */ +typedef struct nvmlSample_st +{ + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlValue_t sampleValue; //!< Sample Value +}nvmlSample_t; + +/** + * Represents type of perf policy for which violation times can be queried + */ +typedef enum nvmlPerfPolicyType_enum +{ + NVML_PERF_POLICY_POWER = 0, //!< How long did power violations cause the GPU to be below application clocks + NVML_PERF_POLICY_THERMAL = 1, //!< How long did thermal violations cause the GPU to be below application clocks + NVML_PERF_POLICY_SYNC_BOOST = 2, //!< How long did sync boost cause the GPU to be below application clocks + NVML_PERF_POLICY_BOARD_LIMIT = 3, //!< How long did the board limit cause the GPU to be below application clocks + NVML_PERF_POLICY_LOW_UTILIZATION = 4, //!< How long did low utilization cause the GPU to be below application clocks + NVML_PERF_POLICY_RELIABILITY = 5, //!< How long did the board reliability limit cause the GPU to be below application clocks + + NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10, //!< Total time the GPU was held below application clocks by any limiter (0 - 5 above) + NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11, //!< Total time the GPU was held below base clocks + + // Keep this last + NVML_PERF_POLICY_COUNT +}nvmlPerfPolicyType_t; + +/** + * Struct to hold perf policy violation status data + */ +typedef struct nvmlViolationTime_st +{ + unsigned long long referenceTime; //!< referenceTime represents CPU timestamp in microseconds + unsigned long long violationTime; //!< violationTime in Nanoseconds +}nvmlViolationTime_t; + +#define NVML_MAX_THERMAL_SENSORS_PER_GPU 3 + +typedef enum +{ + NVML_THERMAL_TARGET_NONE = 0, + NVML_THERMAL_TARGET_GPU = 1, //!< GPU core temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_MEMORY = 2, //!< GPU memory temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_POWER_SUPPLY = 4, //!< GPU power supply temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_BOARD = 8, //!< GPU board ambient temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_VCD_BOARD = 9, //!< Visual Computing Device Board temperature requires NvVisualComputingDeviceHandle + NVML_THERMAL_TARGET_VCD_INLET = 10, //!< Visual Computing Device Inlet temperature requires NvVisualComputingDeviceHandle + NVML_THERMAL_TARGET_VCD_OUTLET = 11, //!< Visual Computing Device Outlet temperature requires NvVisualComputingDeviceHandle + + NVML_THERMAL_TARGET_ALL = 15, + NVML_THERMAL_TARGET_UNKNOWN = -1, +} nvmlThermalTarget_t; + +typedef enum +{ + NVML_THERMAL_CONTROLLER_NONE = 0, + NVML_THERMAL_CONTROLLER_GPU_INTERNAL, + NVML_THERMAL_CONTROLLER_ADM1032, + NVML_THERMAL_CONTROLLER_ADT7461, + NVML_THERMAL_CONTROLLER_MAX6649, + NVML_THERMAL_CONTROLLER_MAX1617, + NVML_THERMAL_CONTROLLER_LM99, + NVML_THERMAL_CONTROLLER_LM89, + NVML_THERMAL_CONTROLLER_LM64, + NVML_THERMAL_CONTROLLER_G781, + NVML_THERMAL_CONTROLLER_ADT7473, + NVML_THERMAL_CONTROLLER_SBMAX6649, + NVML_THERMAL_CONTROLLER_VBIOSEVT, + NVML_THERMAL_CONTROLLER_OS, + NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS, + NVML_THERMAL_CONTROLLER_NVSYSCON_E551, + NVML_THERMAL_CONTROLLER_MAX6649R, + NVML_THERMAL_CONTROLLER_ADT7473S, + NVML_THERMAL_CONTROLLER_UNKNOWN = -1, +} nvmlThermalController_t; + +typedef struct { + nvmlThermalController_t controller; + int defaultMinTemp; + int defaultMaxTemp; + int currentTemp; + nvmlThermalTarget_t target; +} nvmlGpuThermalSettingsSensor_t; + +typedef struct +{ + unsigned int count; + nvmlGpuThermalSettingsSensor_t sensor[NVML_MAX_THERMAL_SENSORS_PER_GPU]; + +} nvmlGpuThermalSettings_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceEnumvs Device Enums + * @{ + */ +/***************************************************************************************************/ + +/** + * Generic enable/disable enum. + */ +typedef enum nvmlEnableState_enum +{ + NVML_FEATURE_DISABLED = 0, //!< Feature disabled + NVML_FEATURE_ENABLED = 1 //!< Feature enabled +} nvmlEnableState_t; + +//! Generic flag used to specify the default behavior of some functions. See description of particular functions for details. +#define nvmlFlagDefault 0x00 +//! Generic flag used to force some behavior. See description of particular functions for details. +#define nvmlFlagForce 0x01 + +/** + * * The Brand of the GPU + * */ +typedef enum nvmlBrandType_enum +{ + NVML_BRAND_UNKNOWN = 0, + NVML_BRAND_QUADRO = 1, + NVML_BRAND_TESLA = 2, + NVML_BRAND_NVS = 3, + NVML_BRAND_GRID = 4, // Deprecated from API reporting. Keeping definition for backward compatibility. + NVML_BRAND_GEFORCE = 5, + NVML_BRAND_TITAN = 6, + NVML_BRAND_NVIDIA_VAPPS = 7, // NVIDIA Virtual Applications + NVML_BRAND_NVIDIA_VPC = 8, // NVIDIA Virtual PC + NVML_BRAND_NVIDIA_VCS = 9, // NVIDIA Virtual Compute Server + NVML_BRAND_NVIDIA_VWS = 10, // NVIDIA RTX Virtual Workstation + NVML_BRAND_NVIDIA_CLOUD_GAMING = 11, // NVIDIA Cloud Gaming + NVML_BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING, // Deprecated from API reporting. Keeping definition for backward compatibility. + NVML_BRAND_QUADRO_RTX = 12, + NVML_BRAND_NVIDIA_RTX = 13, + NVML_BRAND_NVIDIA = 14, + NVML_BRAND_GEFORCE_RTX = 15, // Unused + NVML_BRAND_TITAN_RTX = 16, // Unused + + // Keep this last + NVML_BRAND_COUNT +} nvmlBrandType_t; + +/** + * Temperature thresholds. + */ +typedef enum nvmlTemperatureThresholds_enum +{ + NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will + // shut down for HW protection + NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will + // begin HW slowdown + NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2, // Memory Temperature at which the GPU will + // begin SW slowdown + NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3, // GPU Temperature at which the GPU + // can be throttled below base clock + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = 4, // Minimum GPU Temperature that can be + // set as acoustic threshold + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = 5, // Current temperature that is set as + // acoustic threshold. + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = 6, // Maximum GPU temperature that can be + // set as acoustic threshold. + // Keep this last + NVML_TEMPERATURE_THRESHOLD_COUNT +} nvmlTemperatureThresholds_t; + +/** + * Temperature sensors. + */ +typedef enum nvmlTemperatureSensors_enum +{ + NVML_TEMPERATURE_GPU = 0, //!< Temperature sensor for the GPU die + + // Keep this last + NVML_TEMPERATURE_COUNT +} nvmlTemperatureSensors_t; + +/** + * Compute mode. + * + * NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0. + * Earlier CUDA versions supported a single exclusive mode, + * which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond. + */ +typedef enum nvmlComputeMode_enum +{ + NVML_COMPUTEMODE_DEFAULT = 0, //!< Default compute mode -- multiple contexts per device + NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1, //!< Support Removed + NVML_COMPUTEMODE_PROHIBITED = 2, //!< Compute-prohibited mode -- no contexts per device + NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3, //!< Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time + + // Keep this last + NVML_COMPUTEMODE_COUNT +} nvmlComputeMode_t; + +/** + * Max Clock Monitors available + */ +#define MAX_CLK_DOMAINS 32 + +/** + * Clock Monitor error types + */ +typedef struct nvmlClkMonFaultInfo_struct { + /** + * The Domain which faulted + */ + unsigned int clkApiDomain; + + /** + * Faults Information + */ + unsigned int clkDomainFaultMask; +} nvmlClkMonFaultInfo_t; + +/** + * Clock Monitor Status + */ +typedef struct nvmlClkMonStatus_status { + /** + * Fault status Indicator + */ + unsigned int bGlobalStatus; + + /** + * Total faulted domain numbers + */ + unsigned int clkMonListSize; + + /** + * The fault Information structure + */ + nvmlClkMonFaultInfo_t clkMonList[MAX_CLK_DOMAINS]; +} nvmlClkMonStatus_t; + +/** + * ECC bit types. + * + * @deprecated See \ref nvmlMemoryErrorType_t for a more flexible type + */ +#define nvmlEccBitType_t nvmlMemoryErrorType_t + +/** + * Single bit ECC errors + * + * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_CORRECTED + */ +#define NVML_SINGLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_CORRECTED + +/** + * Double bit ECC errors + * + * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_UNCORRECTED + */ +#define NVML_DOUBLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_UNCORRECTED + +/** + * Memory error types + */ +typedef enum nvmlMemoryErrorType_enum +{ + /** + * A memory error that was corrected + * + * For ECC errors, these are single bit errors + * For Texture memory, these are errors fixed by resend + */ + NVML_MEMORY_ERROR_TYPE_CORRECTED = 0, + /** + * A memory error that was not corrected + * + * For ECC errors, these are double bit errors + * For Texture memory, these are errors where the resend fails + */ + NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1, + + + // Keep this last + NVML_MEMORY_ERROR_TYPE_COUNT //!< Count of memory error types + +} nvmlMemoryErrorType_t; + +/** + * ECC counter types. + * + * Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent. + * On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver + * client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app + * is run. + */ +typedef enum nvmlEccCounterType_enum +{ + NVML_VOLATILE_ECC = 0, //!< Volatile counts are reset each time the driver loads. + NVML_AGGREGATE_ECC = 1, //!< Aggregate counts persist across reboots (i.e. for the lifetime of the device) + + // Keep this last + NVML_ECC_COUNTER_TYPE_COUNT //!< Count of memory counter types +} nvmlEccCounterType_t; + +/** + * Clock types. + * + * All speeds are in Mhz. + */ +typedef enum nvmlClockType_enum +{ + NVML_CLOCK_GRAPHICS = 0, //!< Graphics clock domain + NVML_CLOCK_SM = 1, //!< SM clock domain + NVML_CLOCK_MEM = 2, //!< Memory clock domain + NVML_CLOCK_VIDEO = 3, //!< Video encoder/decoder clock domain + + // Keep this last + NVML_CLOCK_COUNT //!< Count of clock types +} nvmlClockType_t; + +/** + * Clock Ids. These are used in combination with nvmlClockType_t + * to specify a single clock value. + */ +typedef enum nvmlClockId_enum +{ + NVML_CLOCK_ID_CURRENT = 0, //!< Current actual clock value + NVML_CLOCK_ID_APP_CLOCK_TARGET = 1, //!< Target application clock + NVML_CLOCK_ID_APP_CLOCK_DEFAULT = 2, //!< Default application clock target + NVML_CLOCK_ID_CUSTOMER_BOOST_MAX = 3, //!< OEM-defined maximum clock rate + + //Keep this last + NVML_CLOCK_ID_COUNT //!< Count of Clock Ids. +} nvmlClockId_t; + +/** + * Driver models. + * + * Windows only. + */ + +typedef enum nvmlDriverModel_enum +{ + NVML_DRIVER_WDDM = 0, //!< WDDM driver model -- GPU treated as a display device + NVML_DRIVER_WDM = 1 //!< WDM (TCC) model (recommended) -- GPU treated as a generic device +} nvmlDriverModel_t; + +#define NVML_MAX_GPU_PERF_PSTATES 16 + +/** + * Allowed PStates. + */ +typedef enum nvmlPStates_enum +{ + NVML_PSTATE_0 = 0, //!< Performance state 0 -- Maximum Performance + NVML_PSTATE_1 = 1, //!< Performance state 1 + NVML_PSTATE_2 = 2, //!< Performance state 2 + NVML_PSTATE_3 = 3, //!< Performance state 3 + NVML_PSTATE_4 = 4, //!< Performance state 4 + NVML_PSTATE_5 = 5, //!< Performance state 5 + NVML_PSTATE_6 = 6, //!< Performance state 6 + NVML_PSTATE_7 = 7, //!< Performance state 7 + NVML_PSTATE_8 = 8, //!< Performance state 8 + NVML_PSTATE_9 = 9, //!< Performance state 9 + NVML_PSTATE_10 = 10, //!< Performance state 10 + NVML_PSTATE_11 = 11, //!< Performance state 11 + NVML_PSTATE_12 = 12, //!< Performance state 12 + NVML_PSTATE_13 = 13, //!< Performance state 13 + NVML_PSTATE_14 = 14, //!< Performance state 14 + NVML_PSTATE_15 = 15, //!< Performance state 15 -- Minimum Performance + NVML_PSTATE_UNKNOWN = 32 //!< Unknown performance state +} nvmlPstates_t; + +/** + * GPU Operation Mode + * + * GOM allows to reduce power usage and optimize GPU throughput by disabling GPU features. + * + * Each GOM is designed to meet specific user needs. + */ +typedef enum nvmlGom_enum +{ + NVML_GOM_ALL_ON = 0, //!< Everything is enabled and running at full speed + + NVML_GOM_COMPUTE = 1, //!< Designed for running only compute tasks. Graphics operations + //!< are not allowed + + NVML_GOM_LOW_DP = 2 //!< Designed for running graphics applications that don't require + //!< high bandwidth double precision +} nvmlGpuOperationMode_t; + +/** + * Available infoROM objects. + */ +typedef enum nvmlInforomObject_enum +{ + NVML_INFOROM_OEM = 0, //!< An object defined by OEM + NVML_INFOROM_ECC = 1, //!< The ECC object determining the level of ECC support + NVML_INFOROM_POWER = 2, //!< The power management object + + // Keep this last + NVML_INFOROM_COUNT //!< This counts the number of infoROM objects the driver knows about +} nvmlInforomObject_t; + +/** + * Return values for NVML API calls. + */ +typedef enum nvmlReturn_enum +{ + // cppcheck-suppress * + NVML_SUCCESS = 0, //!< The operation was successful + NVML_ERROR_UNINITIALIZED = 1, //!< NVML was not first initialized with nvmlInit() + NVML_ERROR_INVALID_ARGUMENT = 2, //!< A supplied argument is invalid + NVML_ERROR_NOT_SUPPORTED = 3, //!< The requested operation is not available on target device + NVML_ERROR_NO_PERMISSION = 4, //!< The current user does not have permission for operation + NVML_ERROR_ALREADY_INITIALIZED = 5, //!< Deprecated: Multiple initializations are now allowed through ref counting + NVML_ERROR_NOT_FOUND = 6, //!< A query to find an object was unsuccessful + NVML_ERROR_INSUFFICIENT_SIZE = 7, //!< An input argument is not large enough + NVML_ERROR_INSUFFICIENT_POWER = 8, //!< A device's external power cables are not properly attached + NVML_ERROR_DRIVER_NOT_LOADED = 9, //!< NVIDIA driver is not loaded + NVML_ERROR_TIMEOUT = 10, //!< User provided timeout passed + NVML_ERROR_IRQ_ISSUE = 11, //!< NVIDIA Kernel detected an interrupt issue with a GPU + NVML_ERROR_LIBRARY_NOT_FOUND = 12, //!< NVML Shared Library couldn't be found or loaded + NVML_ERROR_FUNCTION_NOT_FOUND = 13, //!< Local version of NVML doesn't implement this function + NVML_ERROR_CORRUPTED_INFOROM = 14, //!< infoROM is corrupted + NVML_ERROR_GPU_IS_LOST = 15, //!< The GPU has fallen off the bus or has otherwise become inaccessible + NVML_ERROR_RESET_REQUIRED = 16, //!< The GPU requires a reset before it can be used again + NVML_ERROR_OPERATING_SYSTEM = 17, //!< The GPU control device has been blocked by the operating system/cgroups + NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18, //!< RM detects a driver/library version mismatch + NVML_ERROR_IN_USE = 19, //!< An operation cannot be performed because the GPU is currently in use + NVML_ERROR_MEMORY = 20, //!< Insufficient memory + NVML_ERROR_NO_DATA = 21, //!< No data + NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22, //!< The requested vgpu operation is not available on target device, becasue ECC is enabled + NVML_ERROR_INSUFFICIENT_RESOURCES = 23, //!< Ran out of critical resources, other than memory + NVML_ERROR_FREQ_NOT_SUPPORTED = 24, //!< Ran out of critical resources, other than memory + NVML_ERROR_ARGUMENT_VERSION_MISMATCH = 25, //!< The provided version is invalid/unsupported + NVML_ERROR_DEPRECATED = 26, //!< The requested functionality has been deprecated + NVML_ERROR_NOT_READY = 27, //!< The system is not ready for the request + NVML_ERROR_GPU_NOT_FOUND = 28, //!< No GPUs were found + NVML_ERROR_INVALID_STATE = 29, //!< Resource not in correct state to perform requested operation + NVML_ERROR_UNKNOWN = 999 //!< An internal driver error occurred +} nvmlReturn_t; + +/** + * See \ref nvmlDeviceGetMemoryErrorCounter + */ +typedef enum nvmlMemoryLocation_enum +{ + NVML_MEMORY_LOCATION_L1_CACHE = 0, //!< GPU L1 Cache + NVML_MEMORY_LOCATION_L2_CACHE = 1, //!< GPU L2 Cache + NVML_MEMORY_LOCATION_DRAM = 2, //!< Turing+ DRAM + NVML_MEMORY_LOCATION_DEVICE_MEMORY = 2, //!< GPU Device Memory + NVML_MEMORY_LOCATION_REGISTER_FILE = 3, //!< GPU Register File + NVML_MEMORY_LOCATION_TEXTURE_MEMORY = 4, //!< GPU Texture Memory + NVML_MEMORY_LOCATION_TEXTURE_SHM = 5, //!< Shared memory + NVML_MEMORY_LOCATION_CBU = 6, //!< CBU + NVML_MEMORY_LOCATION_SRAM = 7, //!< Turing+ SRAM + // Keep this last + NVML_MEMORY_LOCATION_COUNT //!< This counts the number of memory locations the driver knows about +} nvmlMemoryLocation_t; + +/** + * Causes for page retirement + */ +typedef enum nvmlPageRetirementCause_enum +{ + NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS = 0, //!< Page was retired due to multiple single bit ECC error + NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR = 1, //!< Page was retired due to double bit ECC error + + // Keep this last + NVML_PAGE_RETIREMENT_CAUSE_COUNT +} nvmlPageRetirementCause_t; + +/** + * API types that allow changes to default permission restrictions + */ +typedef enum nvmlRestrictedAPI_enum +{ + NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS = 0, //!< APIs that change application clocks, see nvmlDeviceSetApplicationsClocks + //!< and see nvmlDeviceResetApplicationsClocks + NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS = 1, //!< APIs that enable/disable Auto Boosted clocks + //!< see nvmlDeviceSetAutoBoostedClocksEnabled + // Keep this last + NVML_RESTRICTED_API_COUNT +} nvmlRestrictedAPI_t; + +/** @} */ + +/***************************************************************************************************/ +/** @addtogroup virtualGPU + * @{ + */ +/***************************************************************************************************/ +/** @defgroup nvmlVirtualGpuEnums vGPU Enums + * @{ + */ +/***************************************************************************************************/ + +/*! + * GPU virtualization mode types. + */ +typedef enum nvmlGpuVirtualizationMode { + NVML_GPU_VIRTUALIZATION_MODE_NONE = 0, //!< Represents Bare Metal GPU + NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1, //!< Device is associated with GPU-Passthorugh + NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2, //!< Device is associated with vGPU inside virtual machine. + NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3, //!< Device is associated with VGX hypervisor in vGPU mode + NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4 //!< Device is associated with VGX hypervisor in vSGA mode +} nvmlGpuVirtualizationMode_t; + +/** + * Host vGPU modes + */ +typedef enum nvmlHostVgpuMode_enum +{ + NVML_HOST_VGPU_MODE_NON_SRIOV = 0, //!< Non SR-IOV mode + NVML_HOST_VGPU_MODE_SRIOV = 1 //!< SR-IOV mode +} nvmlHostVgpuMode_t; + +/*! + * Types of VM identifiers + */ +typedef enum nvmlVgpuVmIdType { + NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID + NVML_VGPU_VM_ID_UUID = 1 //!< VM ID represents UUID +} nvmlVgpuVmIdType_t; + +/** + * vGPU GUEST info state + */ +typedef enum nvmlVgpuGuestInfoState_enum +{ + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //!< Guest-dependent fields uninitialized + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1 //!< Guest-dependent fields initialized +} nvmlVgpuGuestInfoState_t; + +/** + * vGPU software licensable features + */ +typedef enum { + NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0, //!< Unknown + NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1, //!< Virtual GPU + NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2, //!< Nvidia RTX + NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX, //!< Deprecated, do not use. + NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3, //!< Gaming + NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4 //!< Compute +} nvmlGridLicenseFeatureCode_t; + +/** + * Status codes for license expiry + */ +#define NVML_GRID_LICENSE_EXPIRY_NOT_AVAILABLE 0 //!< Expiry information not available +#define NVML_GRID_LICENSE_EXPIRY_INVALID 1 //!< Invalid expiry or error fetching expiry +#define NVML_GRID_LICENSE_EXPIRY_VALID 2 //!< Valid expiry +#define NVML_GRID_LICENSE_EXPIRY_NOT_APPLICABLE 3 //!< Expiry not applicable +#define NVML_GRID_LICENSE_EXPIRY_PERMANENT 4 //!< Permanent expiry + +/** + * vGPU queryable capabilities + */ +typedef enum nvmlVgpuCapability_enum +{ + NVML_VGPU_CAP_NVLINK_P2P = 0, //!< P2P over NVLink is supported + NVML_VGPU_CAP_GPUDIRECT = 1, //!< GPUDirect capability is supported + NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE = 2, //!< vGPU profile cannot be mixed with other vGPU profiles in same VM + NVML_VGPU_CAP_EXCLUSIVE_TYPE = 3, //!< vGPU profile cannot run on a GPU alongside other profiles of different type + NVML_VGPU_CAP_EXCLUSIVE_SIZE = 4, //!< vGPU profile cannot run on a GPU alongside other profiles of different size + // Keep this last + NVML_VGPU_CAP_COUNT +} nvmlVgpuCapability_t; + +/** +* vGPU driver queryable capabilities +*/ +typedef enum nvmlVgpuDriverCapability_enum +{ + NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = 0, //!< Supports mixing of different vGPU profiles within one guest VM + // Keep this last + NVML_VGPU_DRIVER_CAP_COUNT +} nvmlVgpuDriverCapability_t; + +/** +* Device vGPU queryable capabilities +*/ +typedef enum nvmlDeviceVgpuCapability_enum +{ + NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = 0, //!< Query if the fractional vGPU profiles on this GPU can be used in multi-vGPU configurations + NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = 1, //!< Query if the GPU support concurrent execution of timesliced vGPU profiles of differing types + NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = 2, //!< Query if the GPU support concurrent execution of timesliced vGPU profiles of differing framebuffer sizes + NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = 3, //!< Query the GPU's read_device_buffer expected bandwidth capacity in megabytes per second + NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = 4, //!< Query the GPU's write_device_buffer expected bandwidth capacity in megabytes per second + NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING = 5, //!< Query if vGPU profiles on the GPU supports migration data streaming + NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU = 6, //!< Set/Get support for mini-quarter vGPU profiles + NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = 7, //!< Set/Get support for compute media engine vGPU profiles + // Keep this last + NVML_DEVICE_VGPU_CAP_COUNT +} nvmlDeviceVgpuCapability_t; + +/** @} */ + +/***************************************************************************************************/ + +/** @defgroup nvmlVgpuConstants vGPU Constants + * @{ + */ +/***************************************************************************************************/ + +/** + * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense + */ +#define NVML_GRID_LICENSE_BUFFER_SIZE 128 + +#define NVML_VGPU_NAME_BUFFER_SIZE 64 + +#define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 + +#define INVALID_GPU_INSTANCE_PROFILE_ID 0xFFFFFFFF + +#define INVALID_GPU_INSTANCE_ID 0xFFFFFFFF + +#define NVML_INVALID_VGPU_PLACEMENT_ID 0xFFFF + +/*! + * Macros for vGPU instance's virtualization capabilities bitfield. + */ +#define NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION 0:0 +#define NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION_NO 0x0 +#define NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION_YES 0x1 + +/*! + * Macros for pGPU's virtualization capabilities bitfield. + */ +#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION 0:0 +#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_NO 0x0 +#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_YES 0x1 + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpuStructs vGPU Structs + * @{ + */ +/***************************************************************************************************/ + +typedef unsigned int nvmlVgpuTypeId_t; + +typedef unsigned int nvmlVgpuInstance_t; + +/** + * Structure to store the vGPU heterogeneous mode of device -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int mode; //!< The vGPU heterogeneous mode +} nvmlVgpuHeterogeneousMode_v1_t; +typedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t; +#define nvmlVgpuHeterogeneousMode_v1 NVML_STRUCT_VERSION(VgpuHeterogeneousMode, 1) + +/** + * Structure to store the placement ID of vGPU instance -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int placementId; //!< Placement ID of the active vGPU instance +} nvmlVgpuPlacementId_v1_t; +typedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t; +#define nvmlVgpuPlacementId_v1 NVML_STRUCT_VERSION(VgpuPlacementId, 1) + +/** + * Structure to store the list of vGPU placements -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int placementSize; //!< The number of slots occupied by the vGPU type + unsigned int count; //!< Count of placement IDs fetched + unsigned int *placementIds; //!< Placement IDs for the vGPU type +} nvmlVgpuPlacementList_v1_t; +typedef nvmlVgpuPlacementList_v1_t nvmlVgpuPlacementList_t; +#define nvmlVgpuPlacementList_v1 NVML_STRUCT_VERSION(VgpuPlacementList, 1) + +/** + * Structure to store Utilization Value and vgpuInstance + */ +typedef struct nvmlVgpuInstanceUtilizationSample_st +{ + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlValue_t smUtil; //!< SM (3D/Compute) Util Value + nvmlValue_t memUtil; //!< Frame Buffer Memory Util Value + nvmlValue_t encUtil; //!< Encoder Util Value + nvmlValue_t decUtil; //!< Decoder Util Value +} nvmlVgpuInstanceUtilizationSample_t; + +/** + * Structure to store Utilization Value and vgpuInstance Info -- Version 1 + */ +typedef struct +{ + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + nvmlValue_t smUtil; //!< SM (3D/Compute) Util Value + nvmlValue_t memUtil; //!< Frame Buffer Memory Util Value + nvmlValue_t encUtil; //!< Encoder Util Value + nvmlValue_t decUtil; //!< Decoder Util Value + nvmlValue_t jpgUtil; //!< Jpeg Util Value + nvmlValue_t ofaUtil; //!< Ofa Util Value +} nvmlVgpuInstanceUtilizationInfo_v1_t; + +/** + * Structure to store recent utilization for vGPU instances running on a device -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + nvmlValueType_t sampleValType; //!< Hold the type of returned sample values + unsigned int vgpuInstanceCount; //!< Hold the number of vGPU instances + unsigned long long lastSeenTimeStamp; //!< Return only samples with timestamp greater than lastSeenTimeStamp + nvmlVgpuInstanceUtilizationInfo_v1_t *vgpuUtilArray; //!< The array (allocated by caller) in which vGPU utilization are returned +} nvmlVgpuInstancesUtilizationInfo_v1_t; +typedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t; +#define nvmlVgpuInstancesUtilizationInfo_v1 NVML_STRUCT_VERSION(VgpuInstancesUtilizationInfo, 1) + +/** + * Structure to store Utilization Value, vgpuInstance and subprocess information + */ +typedef struct nvmlVgpuProcessUtilizationSample_st +{ + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned int pid; //!< PID of process running within the vGPU VM + char processName[NVML_VGPU_NAME_BUFFER_SIZE]; //!< Name of process running within the vGPU VM + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value +} nvmlVgpuProcessUtilizationSample_t; + +/** + * Structure to store Utilization Value, vgpuInstance and subprocess information for process running on vGPU instance -- version 1 + */ +typedef struct +{ + char processName[NVML_VGPU_NAME_BUFFER_SIZE]; //!< Name of process running within the vGPU VM + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned int pid; //!< PID of process running within the vGPU VM + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value + unsigned int jpgUtil; //!< Jpeg Util Value + unsigned int ofaUtil; //!< Ofa Util Value +} nvmlVgpuProcessUtilizationInfo_v1_t; + +/** + * Structure to store recent utilization, vgpuInstance and subprocess information for processes running on vGPU instances active on a device -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int vgpuProcessCount; //!< Hold the number of processes running on vGPU instances + unsigned long long lastSeenTimeStamp; //!< Return only samples with timestamp greater than lastSeenTimeStamp + nvmlVgpuProcessUtilizationInfo_v1_t *vgpuProcUtilArray; //!< The array (allocated by caller) in which utilization of processes running on vGPU instances are returned +} nvmlVgpuProcessesUtilizationInfo_v1_t; +typedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t; +#define nvmlVgpuProcessesUtilizationInfo_v1 NVML_STRUCT_VERSION(VgpuProcessesUtilizationInfo, 1) + +/** + * vGPU scheduler policies + */ +#define NVML_VGPU_SCHEDULER_POLICY_UNKNOWN 0 +#define NVML_VGPU_SCHEDULER_POLICY_BEST_EFFORT 1 +#define NVML_VGPU_SCHEDULER_POLICY_EQUAL_SHARE 2 +#define NVML_VGPU_SCHEDULER_POLICY_FIXED_SHARE 3 + +#define NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT 3 + +#define NVML_SCHEDULER_SW_MAX_LOG_ENTRIES 200 + +#define NVML_VGPU_SCHEDULER_ARR_DEFAULT 0 +#define NVML_VGPU_SCHEDULER_ARR_DISABLE 1 +#define NVML_VGPU_SCHEDULER_ARR_ENABLE 2 + +typedef struct { + unsigned int avgFactor; + unsigned int timeslice; +} nvmlVgpuSchedulerParamsVgpuSchedDataWithARR_t; + +typedef struct { + unsigned int timeslice; +} nvmlVgpuSchedulerParamsVgpuSchedData_t; + +/** + * Union to represent the vGPU Scheduler Parameters + */ +typedef union +{ + nvmlVgpuSchedulerParamsVgpuSchedDataWithARR_t vgpuSchedDataWithARR; + + nvmlVgpuSchedulerParamsVgpuSchedData_t vgpuSchedData; + +} nvmlVgpuSchedulerParams_t; + +/** + * Structure to store the state and logs of a software runlist + */ +typedef struct nvmlVgpuSchedulerLogEntries_st +{ + unsigned long long timestamp; //!< Timestamp in ns when this software runlist was preeempted + unsigned long long timeRunTotal; //!< Total time in ns this software runlist has run + unsigned long long timeRun; //!< Time in ns this software runlist ran before preemption + unsigned int swRunlistId; //!< Software runlist Id + unsigned long long targetTimeSlice; //!< The actual timeslice after deduction + unsigned long long cumulativePreemptionTime; //!< Preemption time in ns for this SW runlist +} nvmlVgpuSchedulerLogEntry_t; + +/** + * Structure to store a vGPU software scheduler log + */ +typedef struct nvmlVgpuSchedulerLog_st +{ + unsigned int engineId; //!< Engine whose software runlist log entries are fetched + unsigned int schedulerPolicy; //!< Scheduler policy + unsigned int arrMode; //!< Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*. + nvmlVgpuSchedulerParams_t schedulerParams; + unsigned int entriesCount; //!< Count of log entries fetched + nvmlVgpuSchedulerLogEntry_t logEntries[NVML_SCHEDULER_SW_MAX_LOG_ENTRIES]; +} nvmlVgpuSchedulerLog_t; + +/** + * Structure to store the vGPU scheduler state + */ +typedef struct nvmlVgpuSchedulerGetState_st +{ + unsigned int schedulerPolicy; //!< Scheduler policy + unsigned int arrMode; //!< Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*. + nvmlVgpuSchedulerParams_t schedulerParams; +} nvmlVgpuSchedulerGetState_t; + +typedef struct { + unsigned int avgFactor; + unsigned int frequency; +} nvmlVgpuSchedulerSetParamsVgpuSchedDataWithARR_t; + +typedef struct { + unsigned int timeslice; +} nvmlVgpuSchedulerSetParamsVgpuSchedData_t; + +/** + * Union to represent the vGPU Scheduler set Parameters + */ +typedef union +{ + nvmlVgpuSchedulerSetParamsVgpuSchedDataWithARR_t vgpuSchedDataWithARR; + + nvmlVgpuSchedulerSetParamsVgpuSchedData_t vgpuSchedData; + +} nvmlVgpuSchedulerSetParams_t; + +/** + * Structure to set the vGPU scheduler state + */ +typedef struct nvmlVgpuSchedulerSetState_st +{ + unsigned int schedulerPolicy; //!< Scheduler policy + unsigned int enableARRMode; //!< Adaptive Round Robin scheduler + nvmlVgpuSchedulerSetParams_t schedulerParams; +} nvmlVgpuSchedulerSetState_t; + +/** + * Structure to store the vGPU scheduler capabilities + */ +typedef struct nvmlVgpuSchedulerCapabilities_st +{ + unsigned int supportedSchedulers[NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT]; //!< List the supported vGPU schedulers on the device + unsigned int maxTimeslice; //!< Maximum timeslice value in ns + unsigned int minTimeslice; //!< Minimum timeslice value in ns + unsigned int isArrModeSupported; //!< Flag to check Adaptive Round Robin mode enabled/disabled. + unsigned int maxFrequencyForARR; //!< Maximum frequency for Adaptive Round Robin mode + unsigned int minFrequencyForARR; //!< Minimum frequency for Adaptive Round Robin mode + unsigned int maxAvgFactorForARR; //!< Maximum averaging factor for Adaptive Round Robin mode + unsigned int minAvgFactorForARR; //!< Minimum averaging factor for Adaptive Round Robin mode +} nvmlVgpuSchedulerCapabilities_t; + +/** + * Structure to store the vGPU license expiry details + */ +typedef struct nvmlVgpuLicenseExpiry_st +{ + unsigned int year; //!< Year of license expiry + unsigned short month; //!< Month of license expiry + unsigned short day; //!< Day of license expiry + unsigned short hour; //!< Hour of license expiry + unsigned short min; //!< Minutes of license expiry + unsigned short sec; //!< Seconds of license expiry + unsigned char status; //!< License expiry status +} nvmlVgpuLicenseExpiry_t; + +/** + * vGPU license state + */ +#define NVML_GRID_LICENSE_STATE_UNKNOWN 0 //!< Unknown state +#define NVML_GRID_LICENSE_STATE_UNINITIALIZED 1 //!< Uninitialized state +#define NVML_GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED 2 //!< Unlicensed unrestricted state +#define NVML_GRID_LICENSE_STATE_UNLICENSED_RESTRICTED 3 //!< Unlicensed restricted state +#define NVML_GRID_LICENSE_STATE_UNLICENSED 4 //!< Unlicensed state +#define NVML_GRID_LICENSE_STATE_LICENSED 5 //!< Licensed state + +typedef struct nvmlVgpuLicenseInfo_st +{ + unsigned char isLicensed; //!< License status + nvmlVgpuLicenseExpiry_t licenseExpiry; //!< License expiry information + unsigned int currentState; //!< Current license state +} nvmlVgpuLicenseInfo_t; + +/** + * Structure to store utilization value and process Id + */ +typedef struct nvmlProcessUtilizationSample_st +{ + unsigned int pid; //!< PID of process + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value +} nvmlProcessUtilizationSample_t; + +/** + * Structure to store utilization value and process Id -- version 1 + */ +typedef struct +{ + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int pid; //!< PID of process + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value + unsigned int jpgUtil; //!< Jpeg Util Value + unsigned int ofaUtil; //!< Ofa Util Value +} nvmlProcessUtilizationInfo_v1_t; + +/** + * Structure to store utilization and process ID for each running process -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int processSamplesCount; //!< Caller-supplied array size, and returns number of processes running + unsigned long long lastSeenTimeStamp; //!< Return only samples with timestamp greater than lastSeenTimeStamp + nvmlProcessUtilizationInfo_v1_t *procUtilArray; //!< The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA +} nvmlProcessesUtilizationInfo_v1_t; +typedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t; +#define nvmlProcessesUtilizationInfo_v1 NVML_STRUCT_VERSION(ProcessesUtilizationInfo, 1) + +/** + * Structure to store license expiry date and time values + */ +typedef struct nvmlGridLicenseExpiry_st +{ + unsigned int year; //!< Year value of license expiry + unsigned short month; //!< Month value of license expiry + unsigned short day; //!< Day value of license expiry + unsigned short hour; //!< Hour value of license expiry + unsigned short min; //!< Minutes value of license expiry + unsigned short sec; //!< Seconds value of license expiry + unsigned char status; //!< License expiry status +} nvmlGridLicenseExpiry_t; + +/** + * Structure containing vGPU software licensable feature information + */ +typedef struct nvmlGridLicensableFeature_st +{ + nvmlGridLicenseFeatureCode_t featureCode; //!< Licensed feature code + unsigned int featureState; //!< Non-zero if feature is currently licensed, otherwise zero + char licenseInfo[NVML_GRID_LICENSE_BUFFER_SIZE]; //!< Deprecated. + char productName[NVML_GRID_LICENSE_BUFFER_SIZE]; //!< Product name of feature + unsigned int featureEnabled; //!< Non-zero if feature is enabled, otherwise zero + nvmlGridLicenseExpiry_t licenseExpiry; //!< License expiry structure containing date and time +} nvmlGridLicensableFeature_t; + +/** + * Structure to store vGPU software licensable features + */ +typedef struct nvmlGridLicensableFeatures_st +{ + int isGridLicenseSupported; //!< Non-zero if vGPU Software Licensing is supported on the system, otherwise zero + unsigned int licensableFeaturesCount; //!< Entries returned in \a gridLicensableFeatures array + nvmlGridLicensableFeature_t gridLicensableFeatures[NVML_GRID_LICENSE_FEATURE_MAX_COUNT]; //!< Array of vGPU software licensable features. +} nvmlGridLicensableFeatures_t; + +/** + * Structure to store SRAM uncorrectable error counters + */ +typedef struct +{ + unsigned int version; //!< the API version number + unsigned long long aggregateUncParity; //!< aggregate uncorrectable parity error count + unsigned long long aggregateUncSecDed; //!< aggregate uncorrectable SEC-DED error count + unsigned long long aggregateCor; //!< aggregate correctable error count + unsigned long long volatileUncParity; //!< volatile uncorrectable parity error count + unsigned long long volatileUncSecDed; //!< volatile uncorrectable SEC-DED error count + unsigned long long volatileCor; //!< volatile correctable error count + unsigned long long aggregateUncBucketL2; //!< aggregate uncorrectable error count for L2 cache bucket + unsigned long long aggregateUncBucketSm; //!< aggregate uncorrectable error count for SM bucket + unsigned long long aggregateUncBucketPcie; //!< aggregate uncorrectable error count for PCIE bucket + unsigned long long aggregateUncBucketMcu; //!< aggregate uncorrectable error count for Microcontroller bucket + unsigned long long aggregateUncBucketOther; //!< aggregate uncorrectable error count for Other bucket + unsigned int bThresholdExceeded; //!< if the error threshold of field diag is exceeded +} nvmlEccSramErrorStatus_v1_t; + +typedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t; +#define nvmlEccSramErrorStatus_v1 NVML_STRUCT_VERSION(EccSramErrorStatus, 1) + +/** + * GSP firmware + */ +#define NVML_GSP_FIRMWARE_VERSION_BUF_SIZE 0x40 + +/** + * Simplified chip architecture + */ +#define NVML_DEVICE_ARCH_KEPLER 2 // Devices based on the NVIDIA Kepler architecture +#define NVML_DEVICE_ARCH_MAXWELL 3 // Devices based on the NVIDIA Maxwell architecture +#define NVML_DEVICE_ARCH_PASCAL 4 // Devices based on the NVIDIA Pascal architecture +#define NVML_DEVICE_ARCH_VOLTA 5 // Devices based on the NVIDIA Volta architecture +#define NVML_DEVICE_ARCH_TURING 6 // Devices based on the NVIDIA Turing architecture +#define NVML_DEVICE_ARCH_AMPERE 7 // Devices based on the NVIDIA Ampere architecture +#define NVML_DEVICE_ARCH_ADA 8 // Devices based on the NVIDIA Ada architecture +#define NVML_DEVICE_ARCH_HOPPER 9 // Devices based on the NVIDIA Hopper architecture + +#define NVML_DEVICE_ARCH_UNKNOWN 0xffffffff // Anything else, presumably something newer + +typedef unsigned int nvmlDeviceArchitecture_t; + +/** + * PCI bus types + */ +#define NVML_BUS_TYPE_UNKNOWN 0 +#define NVML_BUS_TYPE_PCI 1 +#define NVML_BUS_TYPE_PCIE 2 +#define NVML_BUS_TYPE_FPCI 3 +#define NVML_BUS_TYPE_AGP 4 + +typedef unsigned int nvmlBusType_t; + +/** + * Device Power Modes + */ + +/** + * Device Fan control policy + */ +#define NVML_FAN_POLICY_TEMPERATURE_CONTINOUS_SW 0 +#define NVML_FAN_POLICY_MANUAL 1 + +typedef unsigned int nvmlFanControlPolicy_t; + +/** + * Device Power Source + */ +#define NVML_POWER_SOURCE_AC 0x00000000 +#define NVML_POWER_SOURCE_BATTERY 0x00000001 +#define NVML_POWER_SOURCE_UNDERSIZED 0x00000002 + +typedef unsigned int nvmlPowerSource_t; + +/* + * Device PCIE link Max Speed + */ +#define NVML_PCIE_LINK_MAX_SPEED_INVALID 0x00000000 +#define NVML_PCIE_LINK_MAX_SPEED_2500MBPS 0x00000001 +#define NVML_PCIE_LINK_MAX_SPEED_5000MBPS 0x00000002 +#define NVML_PCIE_LINK_MAX_SPEED_8000MBPS 0x00000003 +#define NVML_PCIE_LINK_MAX_SPEED_16000MBPS 0x00000004 +#define NVML_PCIE_LINK_MAX_SPEED_32000MBPS 0x00000005 +#define NVML_PCIE_LINK_MAX_SPEED_64000MBPS 0x00000006 + +/* + * Adaptive clocking status + */ +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED 0x00000000 +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED 0x00000001 + +#define NVML_MAX_GPU_UTILIZATIONS 8 +typedef enum nvmlGpuUtilizationDomainId_t +{ + NVML_GPU_UTILIZATION_DOMAIN_GPU = 0, //!< Graphics engine domain + NVML_GPU_UTILIZATION_DOMAIN_FB = 1, //!< Frame buffer domain + NVML_GPU_UTILIZATION_DOMAIN_VID = 2, //!< Video engine domain + NVML_GPU_UTILIZATION_DOMAIN_BUS = 3, //!< Bus interface domain +} nvmlGpuUtilizationDomainId_t; + +typedef struct { + unsigned int bIsPresent; + unsigned int percentage; + unsigned int incThreshold; + unsigned int decThreshold; +} nvmlGpuDynamicPstatesInfoUtilization_t; + +typedef struct nvmlGpuDynamicPstatesInfo_st +{ + unsigned int flags; //!< Reserved for future use + nvmlGpuDynamicPstatesInfoUtilization_t utilization[NVML_MAX_GPU_UTILIZATIONS]; +} nvmlGpuDynamicPstatesInfo_t; + +/** @} */ +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlFieldValueEnums Field Value Enums + * @{ + */ +/***************************************************************************************************/ + +/** + * Field Identifiers. + * + * All Identifiers pertain to a device. Each ID is only used once and is guaranteed never to change. + */ +#define NVML_FI_DEV_ECC_CURRENT 1 //!< Current ECC mode. 1=Active. 0=Inactive +#define NVML_FI_DEV_ECC_PENDING 2 //!< Pending ECC mode. 1=Active. 0=Inactive +/* ECC Count Totals */ +#define NVML_FI_DEV_ECC_SBE_VOL_TOTAL 3 //!< Total single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_TOTAL 4 //!< Total double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_TOTAL 5 //!< Total single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_TOTAL 6 //!< Total double bit aggregate (persistent) ECC errors +/* Individual ECC locations */ +#define NVML_FI_DEV_ECC_SBE_VOL_L1 7 //!< L1 cache single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_L1 8 //!< L1 cache double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_L2 9 //!< L2 cache single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_L2 10 //!< L2 cache double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_DEV 11 //!< Device memory single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_DEV 12 //!< Device memory double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_REG 13 //!< Register file single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_REG 14 //!< Register file double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_TEX 15 //!< Texture memory single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_TEX 16 //!< Texture memory double bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_CBU 17 //!< CBU double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_L1 18 //!< L1 cache single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_L1 19 //!< L1 cache double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_L2 20 //!< L2 cache single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_L2 21 //!< L2 cache double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_DEV 22 //!< Device memory single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_DEV 23 //!< Device memory double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_REG 24 //!< Register File single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_REG 25 //!< Register File double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_TEX 26 //!< Texture memory single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_TEX 27 //!< Texture memory double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_CBU 28 //!< CBU double bit aggregate ECC errors + +/* Page Retirement */ +#define NVML_FI_DEV_RETIRED_SBE 29 //!< Number of retired pages because of single bit errors +#define NVML_FI_DEV_RETIRED_DBE 30 //!< Number of retired pages because of double bit errors +#define NVML_FI_DEV_RETIRED_PENDING 31 //!< If any pages are pending retirement. 1=yes. 0=no. + +/* NvLink Flit Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 32 //!< NVLink flow control CRC Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 33 //!< NVLink flow control CRC Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 34 //!< NVLink flow control CRC Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 35 //!< NVLink flow control CRC Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 36 //!< NVLink flow control CRC Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 37 //!< NVLink flow control CRC Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL 38 //!< NVLink flow control CRC Error Counter total for all Lanes + +/* NvLink CRC Data Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 39 //!< NVLink data CRC Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 40 //!< NVLink data CRC Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 41 //!< NVLink data CRC Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 42 //!< NVLink data CRC Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 43 //!< NVLink data CRC Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 44 //!< NVLink data CRC Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL 45 //!< NvLink data CRC Error Counter total for all Lanes + +/* NvLink Replay Error Counters */ +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 46 //!< NVLink Replay Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 47 //!< NVLink Replay Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 48 //!< NVLink Replay Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 49 //!< NVLink Replay Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 50 //!< NVLink Replay Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 51 //!< NVLink Replay Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL 52 //!< NVLink Replay Error Counter total for all Lanes + +/* NvLink Recovery Error Counters */ +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 53 //!< NVLink Recovery Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 54 //!< NVLink Recovery Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 55 //!< NVLink Recovery Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 56 //!< NVLink Recovery Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 57 //!< NVLink Recovery Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 58 //!< NVLink Recovery Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL 59 //!< NVLink Recovery Error Counter total for all Lanes + +/* NvLink Bandwidth Counters */ +/* + * NVML_FI_DEV_NVLINK_BANDWIDTH_* field values are now deprecated. + * Please use the following field values instead: + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX + */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L0 60 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 0 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L1 61 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 1 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L2 62 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 2 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L3 63 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 3 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L4 64 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 4 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L5 65 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 5 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL 66 //!< NVLink Bandwidth Counter Total for Counter Set 0, All Lanes + +/* NvLink Bandwidth Counters */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L0 67 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 0 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L1 68 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 1 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L2 69 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 2 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L3 70 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 3 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L4 71 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 4 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L5 72 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 5 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL 73 //!< NVLink Bandwidth Counter Total for Counter Set 1, All Lanes + +/* NVML Perf Policy Counters */ +#define NVML_FI_DEV_PERF_POLICY_POWER 74 //!< Perf Policy Counter for Power Policy +#define NVML_FI_DEV_PERF_POLICY_THERMAL 75 //!< Perf Policy Counter for Thermal Policy +#define NVML_FI_DEV_PERF_POLICY_SYNC_BOOST 76 //!< Perf Policy Counter for Sync boost Policy +#define NVML_FI_DEV_PERF_POLICY_BOARD_LIMIT 77 //!< Perf Policy Counter for Board Limit +#define NVML_FI_DEV_PERF_POLICY_LOW_UTILIZATION 78 //!< Perf Policy Counter for Low GPU Utilization Policy +#define NVML_FI_DEV_PERF_POLICY_RELIABILITY 79 //!< Perf Policy Counter for Reliability Policy +#define NVML_FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS 80 //!< Perf Policy Counter for Total App Clock Policy +#define NVML_FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS 81 //!< Perf Policy Counter for Total Base Clocks Policy + +/* Memory temperatures */ +#define NVML_FI_DEV_MEMORY_TEMP 82 //!< Memory temperature for the device + +/* Energy Counter */ +#define NVML_FI_DEV_TOTAL_ENERGY_CONSUMPTION 83 //!< Total energy consumption for the GPU in mJ since the driver was last reloaded + +/* NVLink Speed */ +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L0 84 //!< NVLink Speed in MBps for Link 0 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L1 85 //!< NVLink Speed in MBps for Link 1 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L2 86 //!< NVLink Speed in MBps for Link 2 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L3 87 //!< NVLink Speed in MBps for Link 3 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L4 88 //!< NVLink Speed in MBps for Link 4 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L5 89 //!< NVLink Speed in MBps for Link 5 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_COMMON 90 //!< Common NVLink Speed in MBps for active links + +#define NVML_FI_DEV_NVLINK_LINK_COUNT 91 //!< Number of NVLinks present on the device + +#define NVML_FI_DEV_RETIRED_PENDING_SBE 92 //!< If any pages are pending retirement due to SBE. 1=yes. 0=no. +#define NVML_FI_DEV_RETIRED_PENDING_DBE 93 //!< If any pages are pending retirement due to DBE. 1=yes. 0=no. + +#define NVML_FI_DEV_PCIE_REPLAY_COUNTER 94 //!< PCIe replay counter +#define NVML_FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER 95 //!< PCIe replay rollover counter + +/* NvLink Flit Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 96 //!< NVLink flow control CRC Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 97 //!< NVLink flow control CRC Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 98 //!< NVLink flow control CRC Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 99 //!< NVLink flow control CRC Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 100 //!< NVLink flow control CRC Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 101 //!< NVLink flow control CRC Error Counter for Lane 11 + +/* NvLink CRC Data Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 102 //!< NVLink data CRC Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 103 //!< NVLink data CRC Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 104 //!< NVLink data CRC Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 105 //!< NVLink data CRC Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 106 //!< NVLink data CRC Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 107 //!< NVLink data CRC Error Counter for Lane 11 + +/* NvLink Replay Error Counters */ +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 108 //!< NVLink Replay Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 109 //!< NVLink Replay Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 110 //!< NVLink Replay Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 111 //!< NVLink Replay Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 112 //!< NVLink Replay Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 113 //!< NVLink Replay Error Counter for Lane 11 + +/* NvLink Recovery Error Counters */ +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 114 //!< NVLink Recovery Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 115 //!< NVLink Recovery Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 116 //!< NVLink Recovery Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 117 //!< NVLink Recovery Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 118 //!< NVLink Recovery Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 119 //!< NVLink Recovery Error Counter for Lane 11 + +/* NvLink Bandwidth Counters */ +/* + * NVML_FI_DEV_NVLINK_BANDWIDTH_* field values are now deprecated. + * Please use the following field values instead: + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX + */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L6 120 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 6 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L7 121 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 7 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L8 122 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 8 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L9 123 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 9 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L10 124 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 10 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L11 125 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 11 + +/* NvLink Bandwidth Counters */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L6 126 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 6 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L7 127 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 7 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L8 128 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 8 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L9 129 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 9 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L10 130 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 10 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L11 131 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 11 + +/* NVLink Speed */ +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L6 132 //!< NVLink Speed in MBps for Link 6 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L7 133 //!< NVLink Speed in MBps for Link 7 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L8 134 //!< NVLink Speed in MBps for Link 8 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L9 135 //!< NVLink Speed in MBps for Link 9 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L10 136 //!< NVLink Speed in MBps for Link 10 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L11 137 //!< NVLink Speed in MBps for Link 11 + +/** + * NVLink throughput counters field values + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + * A scopeId of UINT_MAX returns aggregate value summed up across all links + * for the specified counter type in fieldId. + */ +#define NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX 138 //!< NVLink TX Data throughput in KiB +#define NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX 139 //!< NVLink RX Data throughput in KiB +#define NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX 140 //!< NVLink TX Data + protocol overhead in KiB +#define NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX 141 //!< NVLink RX Data + protocol overhead in KiB + +/* Row Remapper */ +#define NVML_FI_DEV_REMAPPED_COR 142 //!< Number of remapped rows due to correctable errors +#define NVML_FI_DEV_REMAPPED_UNC 143 //!< Number of remapped rows due to uncorrectable errors +#define NVML_FI_DEV_REMAPPED_PENDING 144 //!< If any rows are pending remapping. 1=yes 0=no +#define NVML_FI_DEV_REMAPPED_FAILURE 145 //!< If any rows failed to be remapped 1=yes 0=no + +/** + * Remote device NVLink ID + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_REMOTE_NVLINK_ID 146 //!< Remote device NVLink ID + +/** + * NVSwitch: connected NVLink count + */ +#define NVML_FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT 147 //!< Number of NVLinks connected to NVSwitch + +/* NvLink ECC Data Error Counters + * + * Lane ID needs to be specified in the scopeId field in nvmlFieldValue_t. + * + */ +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 148 //!< NVLink data ECC Error Counter for Link 0 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 149 //!< NVLink data ECC Error Counter for Link 1 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 150 //!< NVLink data ECC Error Counter for Link 2 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 151 //!< NVLink data ECC Error Counter for Link 3 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 152 //!< NVLink data ECC Error Counter for Link 4 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 153 //!< NVLink data ECC Error Counter for Link 5 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 154 //!< NVLink data ECC Error Counter for Link 6 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 155 //!< NVLink data ECC Error Counter for Link 7 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 156 //!< NVLink data ECC Error Counter for Link 8 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 157 //!< NVLink data ECC Error Counter for Link 9 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 158 //!< NVLink data ECC Error Counter for Link 10 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 159 //!< NVLink data ECC Error Counter for Link 11 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL 160 //!< NVLink data ECC Error Counter total for all Links + +#define NVML_FI_DEV_NVLINK_ERROR_DL_REPLAY 161 //!< NVLink Replay Error Counter +#define NVML_FI_DEV_NVLINK_ERROR_DL_RECOVERY 162 //!< NVLink Recovery Error Counter +#define NVML_FI_DEV_NVLINK_ERROR_DL_CRC 163 //!< NVLink CRC Error Counter +#define NVML_FI_DEV_NVLINK_GET_SPEED 164 //!< NVLink Speed in MBps +#define NVML_FI_DEV_NVLINK_GET_STATE 165 //!< NVLink State - Active,Inactive +#define NVML_FI_DEV_NVLINK_GET_VERSION 166 //!< NVLink Version + +#define NVML_FI_DEV_NVLINK_GET_POWER_STATE 167 //!< NVLink Power state. 0=HIGH_SPEED 1=LOW_SPEED +#define NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD 168 //!< NVLink length of idle period (in units of 100us) before transitioning links to sleep state + +#define NVML_FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER 169 //!< Device PEX error recovery counter + +#define NVML_FI_DEV_C2C_LINK_COUNT 170 //!< Number of C2C Links present on the device +#define NVML_FI_DEV_C2C_LINK_GET_STATUS 171 //!< C2C Link Status 0=INACTIVE 1=ACTIVE +#define NVML_FI_DEV_C2C_LINK_GET_MAX_BW 172 //!< C2C Link Speed in MBps for active links + +#define NVML_FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS 173 +#define NVML_FI_DEV_PCIE_COUNT_NAKS_RECEIVED 174 +#define NVML_FI_DEV_PCIE_COUNT_RECEIVER_ERROR 175 +#define NVML_FI_DEV_PCIE_COUNT_BAD_TLP 176 +#define NVML_FI_DEV_PCIE_COUNT_NAKS_SENT 177 +#define NVML_FI_DEV_PCIE_COUNT_BAD_DLLP 178 +#define NVML_FI_DEV_PCIE_COUNT_NON_FATAL_ERROR 179 +#define NVML_FI_DEV_PCIE_COUNT_FATAL_ERROR 180 +#define NVML_FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ 181 +#define NVML_FI_DEV_PCIE_COUNT_LCRC_ERROR 182 +#define NVML_FI_DEV_PCIE_COUNT_LANE_ERROR 183 + +#define NVML_FI_DEV_IS_RESETLESS_MIG_SUPPORTED 184 + +/** + * Retrieves power usage for this GPU in milliwatts. + * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode and + * \ref nvmlDeviceGetPowerUsage. + * + * scopeId needs to be specified. It signifies: + * 0 - GPU Only Scope - Metrics for GPU are retrieved + * 1 - Module scope - Metrics for the module (e.g. CPU + GPU) are retrieved. + * Note: CPU here refers to NVIDIA CPU (e.g. Grace). x86 or non-NVIDIA ARM is not supported + */ +#define NVML_FI_DEV_POWER_AVERAGE 185 //!< GPU power averaged over 1 sec interval, supported on Ampere (except GA100) or newer architectures. +#define NVML_FI_DEV_POWER_INSTANT 186 //!< Current GPU power, supported on all architectures. +#define NVML_FI_DEV_POWER_MIN_LIMIT 187 //!< Minimum power limit in milliwatts. +#define NVML_FI_DEV_POWER_MAX_LIMIT 188 //!< Maximum power limit in milliwatts. +#define NVML_FI_DEV_POWER_DEFAULT_LIMIT 189 //!< Default power limit in milliwatts (limit which device boots with). +#define NVML_FI_DEV_POWER_CURRENT_LIMIT 190 //!< Limit currently enforced in milliwatts (This includes other limits set elsewhere. E.g. Out-of-band). +#define NVML_FI_DEV_ENERGY 191 //!< Total energy consumption (in mJ) since the driver was last reloaded. Same as \ref NVML_FI_DEV_TOTAL_ENERGY_CONSUMPTION for the GPU. +#define NVML_FI_DEV_POWER_REQUESTED_LIMIT 192 //!< Power limit requested by NVML or any other userspace client. + +/** + * GPU T.Limit temperature thresholds in degree Celsius + * + * These fields are supported on Ada and later architectures and supersedes \ref nvmlDeviceGetTemperatureThreshold. + */ +#define NVML_FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT 193 //!< T.Limit temperature after which GPU may shut down for HW protection +#define NVML_FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT 194 //!< T.Limit temperature after which GPU may begin HW slowdown +#define NVML_FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT 195 //!< T.Limit temperature after which GPU may begin SW slowdown due to memory temperature +#define NVML_FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT 196 //!< T.Limit temperature after which GPU may be throttled below base clock + +#define NVML_FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE 199 //!< MIG mode independent, MIG query capable device. 1=yes. 0=no. + +#define NVML_FI_MAX 200 //!< One greater than the largest field ID defined above + +/** + * Information for a Field Value Sample + */ +typedef struct nvmlFieldValue_st +{ + unsigned int fieldId; //!< ID of the NVML field to retrieve. This must be set before any call that uses this struct. See the constants starting with NVML_FI_ above. + unsigned int scopeId; //!< Scope ID can represent data used by NVML depending on fieldId's context. For example, for NVLink throughput counter data, scopeId can represent linkId. + long long timestamp; //!< CPU Timestamp of this value in microseconds since 1970 + long long latencyUsec; //!< How long this field value took to update (in usec) within NVML. This may be averaged across several fields that are serviced by the same driver call. + nvmlValueType_t valueType; //!< Type of the value stored in value + nvmlReturn_t nvmlReturn; //!< Return code for retrieving this value. This must be checked before looking at value, as value is undefined if nvmlReturn != NVML_SUCCESS + nvmlValue_t value; //!< Value for this field. This is only valid if nvmlReturn == NVML_SUCCESS +} nvmlFieldValue_t; + + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUnitStructs Unit Structs + * @{ + */ +/***************************************************************************************************/ + +typedef struct +{ + struct nvmlUnit_st* handle; +} nvmlUnit_t; + +/** + * Description of HWBC entry + */ +typedef struct nvmlHwbcEntry_st +{ + unsigned int hwbcId; + char firmwareVersion[32]; +} nvmlHwbcEntry_t; + +/** + * Fan state enum. + */ +typedef enum nvmlFanState_enum +{ + NVML_FAN_NORMAL = 0, //!< Fan is working properly + NVML_FAN_FAILED = 1 //!< Fan has failed +} nvmlFanState_t; + +/** + * Led color enum. + */ +typedef enum nvmlLedColor_enum +{ + NVML_LED_COLOR_GREEN = 0, //!< GREEN, indicates good health + NVML_LED_COLOR_AMBER = 1 //!< AMBER, indicates problem +} nvmlLedColor_t; + + +/** + * LED states for an S-class unit. + */ +typedef struct nvmlLedState_st +{ + char cause[256]; //!< If amber, a text description of the cause + nvmlLedColor_t color; //!< GREEN or AMBER +} nvmlLedState_t; + +/** + * Static S-class unit info. + */ +typedef struct nvmlUnitInfo_st +{ + char name[96]; //!< Product name + char id[96]; //!< Product identifier + char serial[96]; //!< Product serial number + char firmwareVersion[96]; //!< Firmware version +} nvmlUnitInfo_t; + +/** + * Power usage information for an S-class unit. + * The power supply state is a human readable string that equals "Normal" or contains + * a combination of "Abnormal" plus one or more of the following: + * + * - High voltage + * - Fan failure + * - Heatsink temperature + * - Current limit + * - Voltage below UV alarm threshold + * - Low-voltage + * - SI2C remote off command + * - MOD_DISABLE input + * - Short pin transition +*/ +typedef struct nvmlPSUInfo_st +{ + char state[256]; //!< The power supply state + unsigned int current; //!< PSU current (A) + unsigned int voltage; //!< PSU voltage (V) + unsigned int power; //!< PSU power draw (W) +} nvmlPSUInfo_t; + +/** + * Fan speed reading for a single fan in an S-class unit. + */ +typedef struct nvmlUnitFanInfo_st +{ + unsigned int speed; //!< Fan speed (RPM) + nvmlFanState_t state; //!< Flag that indicates whether fan is working properly +} nvmlUnitFanInfo_t; + +/** + * Fan speed readings for an entire S-class unit. + */ +typedef struct nvmlUnitFanSpeeds_st +{ + nvmlUnitFanInfo_t fans[24]; //!< Fan speed data for each fan + unsigned int count; //!< Number of fans in unit +} nvmlUnitFanSpeeds_t; + +/** @} */ + +/***************************************************************************************************/ +/** @addtogroup nvmlEvents + * @{ + */ +/***************************************************************************************************/ + +/** + * Handle to an event set + */ +typedef struct +{ + struct nvmlEventSet_st* handle; +} nvmlEventSet_t; + +/** @defgroup nvmlEventType Event Types + * @{ + * Event Types which user can be notified about. + * See description of particular functions for details. + * + * See \ref nvmlDeviceRegisterEvents and \ref nvmlDeviceGetSupportedEventTypes to check which devices + * support each event. + * + * Types can be combined with bitwise or operator '|' when passed to \ref nvmlDeviceRegisterEvents + */ +//! Event about single bit ECC errors +/** + * \note A corrected texture memory error is not an ECC error, so it does not generate a single bit event + */ +#define nvmlEventTypeSingleBitEccError 0x0000000000000001LL + +//! Event about double bit ECC errors +/** + * \note An uncorrected texture memory error is not an ECC error, so it does not generate a double bit event + */ +#define nvmlEventTypeDoubleBitEccError 0x0000000000000002LL + +//! Event about PState changes +/** + * \note On Fermi architecture PState changes are also an indicator that GPU is throttling down due to + * no work being executed on the GPU, power capping or thermal capping. In a typical situation, + * Fermi-based GPU should stay in P0 for the duration of the execution of the compute process. + */ +#define nvmlEventTypePState 0x0000000000000004LL + +//! Event that Xid critical error occurred +#define nvmlEventTypeXidCriticalError 0x0000000000000008LL + +//! Event about clock changes +/** + * Kepler only + */ +#define nvmlEventTypeClock 0x0000000000000010LL + +//! Event about AC/Battery power source changes +#define nvmlEventTypePowerSourceChange 0x0000000000000080LL + +//! Event about MIG configuration changes +#define nvmlEventMigConfigChange 0x0000000000000100LL + +//! Mask with no events +#define nvmlEventTypeNone 0x0000000000000000LL + +//! Mask of all events +#define nvmlEventTypeAll (nvmlEventTypeNone \ + | nvmlEventTypeSingleBitEccError \ + | nvmlEventTypeDoubleBitEccError \ + | nvmlEventTypePState \ + | nvmlEventTypeClock \ + | nvmlEventTypeXidCriticalError \ + | nvmlEventTypePowerSourceChange \ + | nvmlEventMigConfigChange \ + ) +/** @} */ + +/** + * Information about occurred event + */ +typedef struct nvmlEventData_st +{ + nvmlDevice_t device; //!< Specific device where the event occurred + unsigned long long eventType; //!< Information about what specific event occurred + unsigned long long eventData; //!< Stores XID error for the device in the event of nvmlEventTypeXidCriticalError, + // eventData is 0 for any other event. eventData is set as 999 for unknown xid error. + unsigned int gpuInstanceId; //!< If MIG is enabled and nvmlEventTypeXidCriticalError event is attributable to a GPU + // instance, stores a valid GPU instance ID. gpuInstanceId is set to 0xFFFFFFFF + // otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled and nvmlEventTypeXidCriticalError event is attributable to a + // compute instance, stores a valid compute instance ID. computeInstanceId is set to + // 0xFFFFFFFF otherwise. +} nvmlEventData_t; + +/** @} */ + +/***************************************************************************************************/ +/** @addtogroup nvmlClocksEventReasons + * @{ + */ +/***************************************************************************************************/ + +/** Nothing is running on the GPU and the clocks are dropping to Idle state + * \note This limiter may be removed in a later release + */ +#define nvmlClocksEventReasonGpuIdle 0x0000000000000001LL + +/** GPU clocks are limited by current setting of applications clocks + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetApplicationsClock + */ +#define nvmlClocksEventReasonApplicationsClocksSetting 0x0000000000000002LL + +/** + * @deprecated Renamed to \ref nvmlClocksThrottleReasonApplicationsClocksSetting + * as the name describes the situation more accurately. + */ +#define nvmlClocksThrottleReasonUserDefinedClocks nvmlClocksEventReasonApplicationsClocksSetting + +/** The clocks have been optimized to ensure not to exceed currently set power limits + * + * @see nvmlDeviceGetPowerUsage + * @see nvmlDeviceSetPowerManagementLimit + * @see nvmlDeviceGetPowerManagementLimit + */ +#define nvmlClocksEventReasonSwPowerCap 0x0000000000000004LL + +/** HW Slowdown (reducing the core clocks by a factor of 2 or more) is engaged + * + * This is an indicator of: + * - temperature being too high + * - External Power Brake Assertion is triggered (e.g. by the system power supply) + * - Power draw is too high and Fast Trigger protection is reducing the clocks + * - May be also reported during PState or clock change + * - This behavior may be removed in a later release. + * + * @see nvmlDeviceGetTemperature + * @see nvmlDeviceGetTemperatureThreshold + * @see nvmlDeviceGetPowerUsage + */ +#define nvmlClocksThrottleReasonHwSlowdown 0x0000000000000008LL + +/** Sync Boost + * + * This GPU has been added to a Sync boost group with nvidia-smi or DCGM in + * order to maximize performance per watt. All GPUs in the sync boost group + * will boost to the minimum possible clocks across the entire group. Look at + * the throttle reasons for other GPUs in the system to see why those GPUs are + * holding this one at lower clocks. + * + */ +#define nvmlClocksEventReasonSyncBoost 0x0000000000000010LL + +/** SW Thermal Slowdown + * + * The current clocks have been optimized to ensure the the following is true: + * - Current GPU temperature does not exceed GPU Max Operating Temperature + * - Current memory temperature does not exceeed Memory Max Operating Temperature + * + */ +#define nvmlClocksEventReasonSwThermalSlowdown 0x0000000000000020LL + +/** HW Thermal Slowdown (reducing the core clocks by a factor of 2 or more) is engaged + * + * This is an indicator of: + * - temperature being too high + * + * @see nvmlDeviceGetTemperature + * @see nvmlDeviceGetTemperatureThreshold + * @see nvmlDeviceGetPowerUsage + */ +#define nvmlClocksThrottleReasonHwThermalSlowdown 0x0000000000000040LL + +/** HW Power Brake Slowdown (reducing the core clocks by a factor of 2 or more) is engaged + * + * This is an indicator of: + * - External Power Brake Assertion being triggered (e.g. by the system power supply) + * + * @see nvmlDeviceGetTemperature + * @see nvmlDeviceGetTemperatureThreshold + * @see nvmlDeviceGetPowerUsage + */ +#define nvmlClocksThrottleReasonHwPowerBrakeSlowdown 0x0000000000000080LL + +/** GPU clocks are limited by current setting of Display clocks + * + * @see bug 1997531 + */ +#define nvmlClocksEventReasonDisplayClockSetting 0x0000000000000100LL + +/** Bit mask representing no clocks throttling + * + * Clocks are as high as possible. + * */ +#define nvmlClocksEventReasonNone 0x0000000000000000LL + +/** Bit mask representing all supported clocks throttling reasons + * New reasons might be added to this list in the future + */ +#define nvmlClocksEventReasonAll (nvmlClocksThrottleReasonNone \ + | nvmlClocksEventReasonGpuIdle \ + | nvmlClocksEventReasonApplicationsClocksSetting \ + | nvmlClocksEventReasonSwPowerCap \ + | nvmlClocksThrottleReasonHwSlowdown \ + | nvmlClocksEventReasonSyncBoost \ + | nvmlClocksEventReasonSwThermalSlowdown \ + | nvmlClocksThrottleReasonHwThermalSlowdown \ + | nvmlClocksThrottleReasonHwPowerBrakeSlowdown \ + | nvmlClocksEventReasonDisplayClockSetting \ +) + +/** + * @deprecated Use \ref nvmlClocksEventReasonGpuIdle instead + */ +#define nvmlClocksThrottleReasonGpuIdle nvmlClocksEventReasonGpuIdle +/** + * @deprecated Use \ref nvmlClocksEventReasonApplicationsClocksSetting instead + */ +#define nvmlClocksThrottleReasonApplicationsClocksSetting nvmlClocksEventReasonApplicationsClocksSetting +/** + * @deprecated Use \ref nvmlClocksEventReasonSyncBoost instead + */ +#define nvmlClocksThrottleReasonSyncBoost nvmlClocksEventReasonSyncBoost +/** + * @deprecated Use \ref nvmlClocksEventReasonSwPowerCap instead + */ +#define nvmlClocksThrottleReasonSwPowerCap nvmlClocksEventReasonSwPowerCap +/** + * @deprecated Use \ref nvmlClocksEventReasonSwThermalSlowdown instead + */ +#define nvmlClocksThrottleReasonSwThermalSlowdown nvmlClocksEventReasonSwThermalSlowdown +/** + * @deprecated Use \ref nvmlClocksEventReasonDisplayClockSetting instead + */ +#define nvmlClocksThrottleReasonDisplayClockSetting nvmlClocksEventReasonDisplayClockSetting +/** + * @deprecated Use \ref nvmlClocksEventReasonNone instead + */ +#define nvmlClocksThrottleReasonNone nvmlClocksEventReasonNone +/** + * @deprecated Use \ref nvmlClocksEventReasonAll instead + */ +#define nvmlClocksThrottleReasonAll nvmlClocksEventReasonAll +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlAccountingStats Accounting Statistics + * @{ + * + * Set of APIs designed to provide per process information about usage of GPU. + * + * @note All accounting statistics and accounting mode live in nvidia driver and reset + * to default (Disabled) when driver unloads. + * It is advised to run with persistence mode enabled. + * + * @note Enabling accounting mode has no negative impact on the GPU performance. + */ +/***************************************************************************************************/ + +/** + * Describes accounting statistics of a process. + */ +typedef struct nvmlAccountingStats_st { + unsigned int gpuUtilization; //!< Percent of time over the process's lifetime during which one or more kernels was executing on the GPU. + //! Utilization stats just like returned by \ref nvmlDeviceGetUtilizationRates but for the life time of a + //! process (not just the last sample period). + //! Set to NVML_VALUE_NOT_AVAILABLE if nvmlDeviceGetUtilizationRates is not supported + + unsigned int memoryUtilization; //!< Percent of time over the process's lifetime during which global (device) memory was being read or written. + //! Set to NVML_VALUE_NOT_AVAILABLE if nvmlDeviceGetUtilizationRates is not supported + + unsigned long long maxMemoryUsage; //!< Maximum total memory in bytes that was ever allocated by the process. + //! Set to NVML_VALUE_NOT_AVAILABLE if nvmlProcessInfo_t->usedGpuMemory is not supported + + + unsigned long long time; //!< Amount of time in ms during which the compute context was active. The time is reported as 0 if + //!< the process is not terminated + + unsigned long long startTime; //!< CPU Timestamp in usec representing start time for the process + + unsigned int isRunning; //!< Flag to represent if the process is running (1 for running, 0 for terminated) + + unsigned int reserved[5]; //!< Reserved for future use +} nvmlAccountingStats_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlEncoderStructs Encoder Structs + * @{ + */ +/***************************************************************************************************/ + +/** + * Represents type of encoder for capacity can be queried + */ +typedef enum nvmlEncoderQueryType_enum +{ + NVML_ENCODER_QUERY_H264 = 0x00, //!< H264 encoder + NVML_ENCODER_QUERY_HEVC = 0x01, //!< HEVC encoder + NVML_ENCODER_QUERY_AV1 = 0x02, //!< AV1 encoder + NVML_ENCODER_QUERY_UNKNOWN = 0xFF //!< Unknown encoder +}nvmlEncoderType_t; + +/** + * Structure to hold encoder session data + */ +typedef struct nvmlEncoderSessionInfo_st +{ + unsigned int sessionId; //!< Unique session ID + unsigned int pid; //!< Owning process ID + nvmlVgpuInstance_t vgpuInstance; //!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero) + nvmlEncoderType_t codecType; //!< Video encoder type + unsigned int hResolution; //!< Current encode horizontal resolution + unsigned int vResolution; //!< Current encode vertical resolution + unsigned int averageFps; //!< Moving average encode frames per second + unsigned int averageLatency; //!< Moving average encode latency in microseconds +}nvmlEncoderSessionInfo_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlFBCStructs Frame Buffer Capture Structures +* @{ +*/ +/***************************************************************************************************/ + +/** + * Represents frame buffer capture session type + */ +typedef enum nvmlFBCSessionType_enum +{ + NVML_FBC_SESSION_TYPE_UNKNOWN = 0, //!< Unknown + NVML_FBC_SESSION_TYPE_TOSYS, //!< ToSys + NVML_FBC_SESSION_TYPE_CUDA, //!< Cuda + NVML_FBC_SESSION_TYPE_VID, //!< Vid + NVML_FBC_SESSION_TYPE_HWENC //!< HEnc +} nvmlFBCSessionType_t; + +/** + * Structure to hold frame buffer capture sessions stats + */ +typedef struct nvmlFBCStats_st +{ + unsigned int sessionsCount; //!< Total no of sessions + unsigned int averageFPS; //!< Moving average new frames captured per second + unsigned int averageLatency; //!< Moving average new frame capture latency in microseconds +} nvmlFBCStats_t; + +#define NVML_NVFBC_SESSION_FLAG_DIFFMAP_ENABLED 0x00000001 //!< Bit specifying differential map state. +#define NVML_NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED 0x00000002 //!< Bit specifying classification map state. +#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT 0x00000004 //!< Bit specifying if capture was requested as non-blocking call. +#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE 0x00000008 //!< Bit specifying if capture was requested as blocking call. +#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT 0x00000010 //!< Bit specifying if capture was requested as blocking call with timeout period. + +/** + * Structure to hold FBC session data + */ +typedef struct nvmlFBCSessionInfo_st +{ + unsigned int sessionId; //!< Unique session ID + unsigned int pid; //!< Owning process ID + nvmlVgpuInstance_t vgpuInstance; //!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero) + unsigned int displayOrdinal; //!< Display identifier + nvmlFBCSessionType_t sessionType; //!< Type of frame buffer capture session + unsigned int sessionFlags; //!< Session flags (one or more of NVML_NVFBC_SESSION_FLAG_XXX). + unsigned int hMaxResolution; //!< Max horizontal resolution supported by the capture session + unsigned int vMaxResolution; //!< Max vertical resolution supported by the capture session + unsigned int hResolution; //!< Horizontal resolution requested by caller in capture call + unsigned int vResolution; //!< Vertical resolution requested by caller in capture call + unsigned int averageFPS; //!< Moving average new frames captured per second + unsigned int averageLatency; //!< Moving average new frame capture latency in microseconds +} nvmlFBCSessionInfo_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDrainDefs definitions related to the drain state + * @{ + */ +/***************************************************************************************************/ + +/** + * Is the GPU device to be removed from the kernel by nvmlDeviceRemoveGpu() + */ +typedef enum nvmlDetachGpuState_enum +{ + NVML_DETACH_GPU_KEEP = 0, + NVML_DETACH_GPU_REMOVE +} nvmlDetachGpuState_t; + +/** + * Parent bridge PCIe link state requested by nvmlDeviceRemoveGpu() + */ +typedef enum nvmlPcieLinkState_enum +{ + NVML_PCIE_LINK_KEEP = 0, + NVML_PCIE_LINK_SHUT_DOWN +} nvmlPcieLinkState_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlSystem/nvmlDevice definitions related to Confidential Computing + * @{ + */ +/***************************************************************************************************/ +/** + * Confidential Compute CPU Capabilities values + */ +#define NVML_CC_SYSTEM_CPU_CAPS_NONE 0 +#define NVML_CC_SYSTEM_CPU_CAPS_AMD_SEV 1 +#define NVML_CC_SYSTEM_CPU_CAPS_INTEL_TDX 2 + +/** + * Confidenial Compute GPU Capabilities values + */ +#define NVML_CC_SYSTEM_GPUS_CC_NOT_CAPABLE 0 +#define NVML_CC_SYSTEM_GPUS_CC_CAPABLE 1 + +typedef struct nvmlConfComputeSystemCaps_st { + unsigned int cpuCaps; + unsigned int gpusCaps; +} nvmlConfComputeSystemCaps_t; + +/** + * Confidential Compute DevTools Mode values + */ +#define NVML_CC_SYSTEM_DEVTOOLS_MODE_OFF 0 +#define NVML_CC_SYSTEM_DEVTOOLS_MODE_ON 1 + +/** + * Confidential Compute Environment values + */ +#define NVML_CC_SYSTEM_ENVIRONMENT_UNAVAILABLE 0 +#define NVML_CC_SYSTEM_ENVIRONMENT_SIM 1 +#define NVML_CC_SYSTEM_ENVIRONMENT_PROD 2 + +/** + * Confidential Compute Feature Status values + */ +#define NVML_CC_SYSTEM_FEATURE_DISABLED 0 +#define NVML_CC_SYSTEM_FEATURE_ENABLED 1 + +typedef struct nvmlConfComputeSystemState_st { + unsigned int environment; + unsigned int ccFeature; + unsigned int devToolsMode; +} nvmlConfComputeSystemState_t; + +/** + * Confidential Compute Multigpu mode values + */ +#define NVML_CC_SYSTEM_MULTIGPU_NONE 0 +#define NVML_CC_SYSTEM_MULTIGPU_PROTECTED_PCIE 1 + +/** + * Confidential Compute System settings + */ +typedef struct { + unsigned int version; + unsigned int environment; + unsigned int ccFeature; + unsigned int devToolsMode; + unsigned int multiGpuMode; +} nvmlSystemConfComputeSettings_v1_t; + +typedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t; +#define nvmlSystemConfComputeSettings_v1 NVML_STRUCT_VERSION(SystemConfComputeSettings, 1) + +/** + * Protected memory size + */ +typedef struct +nvmlConfComputeMemSizeInfo_st +{ + unsigned long long protectedMemSizeKib; + unsigned long long unprotectedMemSizeKib; +} nvmlConfComputeMemSizeInfo_t; + +/** + * Confidential Compute GPUs/System Ready State values + */ +#define NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE 0 +#define NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE 1 + +/** + * GPU Certificate Details + */ +#define NVML_GPU_CERT_CHAIN_SIZE 0x1000 +#define NVML_GPU_ATTESTATION_CERT_CHAIN_SIZE 0x1400 + +typedef struct nvmlConfComputeGpuCertificate_st { + unsigned int certChainSize; + unsigned int attestationCertChainSize; + unsigned char certChain[NVML_GPU_CERT_CHAIN_SIZE]; + unsigned char attestationCertChain[NVML_GPU_ATTESTATION_CERT_CHAIN_SIZE]; +} nvmlConfComputeGpuCertificate_t; + +/** + * GPU Attestation Report + */ +#define NVML_CC_GPU_CEC_NONCE_SIZE 0x20 +#define NVML_CC_GPU_ATTESTATION_REPORT_SIZE 0x2000 +#define NVML_CC_GPU_CEC_ATTESTATION_REPORT_SIZE 0x1000 +#define NVML_CC_CEC_ATTESTATION_REPORT_NOT_PRESENT 0 +#define NVML_CC_CEC_ATTESTATION_REPORT_PRESENT 1 +#define NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN 50 +#define NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX 75 + +typedef struct nvmlConfComputeGpuAttestationReport_st { + unsigned int isCecAttestationReportPresent; + unsigned int attestationReportSize; + unsigned int cecAttestationReportSize; + unsigned char nonce[NVML_CC_GPU_CEC_NONCE_SIZE]; + unsigned char attestationReport[NVML_CC_GPU_ATTESTATION_REPORT_SIZE]; + unsigned char cecAttestationReport[NVML_CC_GPU_CEC_ATTESTATION_REPORT_SIZE]; +} nvmlConfComputeGpuAttestationReport_t; + +typedef struct nvmlConfComputeSetKeyRotationThresholdInfo_st { + unsigned int version; + unsigned long long maxAttackerAdvantage; +} nvmlConfComputeSetKeyRotationThresholdInfo_v1_t; + +typedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t; +#define nvmlConfComputeSetKeyRotationThresholdInfo_v1 \ + NVML_STRUCT_VERSION(ConfComputeSetKeyRotationThresholdInfo, 1) + +typedef struct nvmlConfComputeGetKeyRotationThresholdInfo_st { + unsigned int version; + unsigned long long attackerAdvantage; +} nvmlConfComputeGetKeyRotationThresholdInfo_v1_t; + +typedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t; +#define nvmlConfComputeGetKeyRotationThresholdInfo_v1 \ + NVML_STRUCT_VERSION(ConfComputeGetKeyRotationThresholdInfo, 1) + +/** @} */ + +#define NVML_GPU_FABRIC_UUID_LEN 16 + +#define NVML_GPU_FABRIC_STATE_NOT_SUPPORTED 0 +#define NVML_GPU_FABRIC_STATE_NOT_STARTED 1 +#define NVML_GPU_FABRIC_STATE_IN_PROGRESS 2 +#define NVML_GPU_FABRIC_STATE_COMPLETED 3 + +typedef unsigned char nvmlGpuFabricState_t; + +typedef struct { + unsigned char clusterUuid[NVML_GPU_FABRIC_UUID_LEN]; //!< Uuid of the cluster to which this GPU belongs + nvmlReturn_t status; //!< Error status, if any. Must be checked only if state returns "complete". + unsigned int cliqueId; //!< ID of the fabric clique to which this GPU belongs + nvmlGpuFabricState_t state; //!< Current state of GPU registration process +} nvmlGpuFabricInfo_t; + +#define NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED 0 +#define NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE 1 +#define NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE 2 + +#define NVML_GPU_FABRIC_HEALTH_MASK_SHIFT_DEGRADED_BW 0 +#define NVML_GPU_FABRIC_HEALTH_MASK_WIDTH_DEGRADED_BW 0x11 + +/** + * GPU Fabric Health Status Mask for various fields can be obtained + * using the below macro. + * Ex - NVML_GPU_FABRIC_HEALTH_GET(var, _DEGRADED_BW) + */ +#define NVML_GPU_FABRIC_HEALTH_GET(var, type) \ + (((var) >> NVML_GPU_FABRIC_HEALTH_MASK_SHIFT##type) & \ + (NVML_GPU_FABRIC_HEALTH_MASK_WIDTH##type)) + +/** + * GPU Fabric Health Status Mask for various fields can be tested + * using the below macro. + * Ex - NVML_GPU_FABRIC_HEALTH_TEST(var, _DEGRADED_BW, _TRUE) + */ +#define NVML_GPU_FABRIC_HEALTH_TEST(var, type, val) \ + (NVML_GPU_FABRIC_HEALTH_GET(var, type) == \ + NVML_GPU_FABRIC_HEALTH_MASK##type##val) + +/** +* GPU Fabric information (v2). +* +* Version 2 adds the \ref nvmlGpuFabricInfo_v2_t.version field +* to the start of the structure, and the \ref nvmlGpuFabricInfo_v2_t.healthMask +* field to the end. This structure is not backwards-compatible with +* \ref nvmlGpuFabricInfo_t. +*/ +typedef struct { + unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuFabricInfo_v2) + unsigned char clusterUuid[NVML_GPU_FABRIC_UUID_LEN]; //!< Uuid of the cluster to which this GPU belongs + nvmlReturn_t status; //!< Error status, if any. Must be checked only if state returns "complete". + unsigned int cliqueId; //!< ID of the fabric clique to which this GPU belongs + nvmlGpuFabricState_t state; //!< Current state of GPU registration process + unsigned int healthMask; //!< GPU Fabric health Status Mask +} nvmlGpuFabricInfo_v2_t; + +typedef nvmlGpuFabricInfo_v2_t nvmlGpuFabricInfoV_t; + +/** +* Version identifier value for \ref nvmlGpuFabricInfo_v2_t.version. +*/ +#define nvmlGpuFabricInfo_v2 NVML_STRUCT_VERSION(GpuFabricInfo, 2) + +/** + * Device Scope - This is useful to retrieve the telemetry at GPU and module (e.g. GPU + CPU) level + */ +#define NVML_POWER_SCOPE_GPU 0U //!< Targets only GPU +#define NVML_POWER_SCOPE_MODULE 1U //!< Targets the whole module +#define NVML_POWER_SCOPE_MEMORY 2U //!< Targets the GPU Memory + +typedef unsigned char nvmlPowerScopeType_t; + +typedef struct +{ + unsigned int version; //!< Structure format version (must be 1) + nvmlPowerScopeType_t powerScope; //!< [in] Device type: GPU or Total Module + unsigned int powerValueMw; //!< [out] Power value to retrieve or set in milliwatts +} nvmlPowerValue_v2_t; + +#define nvmlPowerValue_v2 NVML_STRUCT_VERSION(PowerValue, 2) + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlInitializationAndCleanup Initialization and Cleanup + * This chapter describes the methods that handle NVML initialization and cleanup. + * It is the user's responsibility to call \ref nvmlInit_v2() before calling any other methods, and + * nvmlShutdown() once NVML is no longer being used. + * @{ + */ +/***************************************************************************************************/ + +#define NVML_INIT_FLAG_NO_GPUS 1 //!< Don't fail nvmlInit() when no GPUs are found +#define NVML_INIT_FLAG_NO_ATTACH 2 //!< Don't attach GPUs + +/** + * Initialize NVML, but don't initialize any GPUs yet. + * + * \note nvmlInit_v3 introduces a "flags" argument, that allows passing boolean values + * modifying the behaviour of nvmlInit(). + * \note In NVML 5.319 new nvmlInit_v2 has replaced nvmlInit"_v1" (default in NVML 4.304 and older) that + * did initialize all GPU devices in the system. + * + * This allows NVML to communicate with a GPU + * when other GPUs in the system are unstable or in a bad state. When using this API, GPUs are + * discovered and initialized in nvmlDeviceGetHandleBy* functions instead. + * + * \note To contrast nvmlInit_v2 with nvmlInit"_v1", NVML 4.304 nvmlInit"_v1" will fail when any detected GPU is in + * a bad or unstable state. + * + * For all products. + * + * This method, should be called once before invoking any other methods in the library. + * A reference count of the number of initializations is maintained. Shutdown only occurs + * when the reference count reaches zero. + * + * @return + * - \ref NVML_SUCCESS if NVML has been properly initialized + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running + * - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlInit_v2(void); + +/** + * nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values + * modifying the behaviour of nvmlInit(). + * Other than the "flags" parameter it is completely similar to \ref nvmlInit_v2. + * + * For all products. + * + * @param flags behaviour modifier flags + * + * @return + * - \ref NVML_SUCCESS if NVML has been properly initialized + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running + * - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlInitWithFlags(unsigned int flags); + +/** + * Shut down NVML by releasing all GPU resources previously allocated with \ref nvmlInit_v2(). + * + * For all products. + * + * This method should be called after NVML work is done, once for each call to \ref nvmlInit_v2() + * A reference count of the number of initializations is maintained. Shutdown only occurs + * when the reference count reaches zero. For backwards compatibility, no error is reported if + * nvmlShutdown() is called more times than nvmlInit(). + * + * @return + * - \ref NVML_SUCCESS if NVML has been properly shut down + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlShutdown(void); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlErrorReporting Error reporting + * This chapter describes helper functions for error reporting routines. + * @{ + */ +/***************************************************************************************************/ + +/** + * Helper method for converting NVML error codes into readable strings. + * + * For all products. + * + * @param result NVML error code to convert + * + * @return String representation of the error. + * + */ +const DECLDIR char* nvmlErrorString(nvmlReturn_t result); +/** @} */ + + +/***************************************************************************************************/ +/** @defgroup nvmlConstants Constants + * @{ + */ +/***************************************************************************************************/ + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetInforomVersion and \ref nvmlDeviceGetInforomImageVersion + */ +#define NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE 16 + +/** + * Buffer size guaranteed to be large enough for storing GPU identifiers. + */ +#define NVML_DEVICE_UUID_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetUUID + */ +#define NVML_DEVICE_UUID_V2_BUFFER_SIZE 96 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetBoardPartNumber + */ +#define NVML_DEVICE_PART_NUMBER_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlSystemGetDriverVersion + */ +#define NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlSystemGetNVMLVersion + */ +#define NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for storing GPU device names. + */ +#define NVML_DEVICE_NAME_BUFFER_SIZE 64 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetName + */ +#define NVML_DEVICE_NAME_V2_BUFFER_SIZE 96 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetSerial + */ +#define NVML_DEVICE_SERIAL_BUFFER_SIZE 30 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetVbiosVersion + */ +#define NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE 32 + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlSystemQueries System Queries + * This chapter describes the queries that NVML can perform against the local system. These queries + * are not device-specific. + * @{ + */ +/***************************************************************************************************/ + +/** + * Retrieves the version of the system's graphics driver. + * + * For all products. + * + * The version identifier is an alphanumeric string. It will not exceed 80 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. + * + * @param version Reference in which to return the version identifier + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetDriverVersion(char *version, unsigned int length); + +/** + * Retrieves the version of the NVML library. + * + * For all products. + * + * The version identifier is an alphanumeric string. It will not exceed 80 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE. + * + * @param version Reference in which to return the version identifier + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetNVMLVersion(char *version, unsigned int length); + +/** + * Retrieves the version of the CUDA driver. + * + * For all products. + * + * The CUDA driver version returned will be retreived from the currently installed version of CUDA. + * If the cuda library is not found, this function will return a known supported version number. + * + * @param cudaDriverVersion Reference in which to return the version identifier + * + * @return + * - \ref NVML_SUCCESS if \a cudaDriverVersion has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL + */ +nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion(int *cudaDriverVersion); + +/** + * Retrieves the version of the CUDA driver from the shared library. + * + * For all products. + * + * The returned CUDA driver version by calling cuDriverGetVersion() + * + * @param cudaDriverVersion Reference in which to return the version identifier + * + * @return + * - \ref NVML_SUCCESS if \a cudaDriverVersion has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL + * - \ref NVML_ERROR_LIBRARY_NOT_FOUND if \a libcuda.so.1 or libcuda.dll is not found + * - \ref NVML_ERROR_FUNCTION_NOT_FOUND if \a cuDriverGetVersion() is not found in the shared library + */ +nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion_v2(int *cudaDriverVersion); + +/** + * Macros for converting the CUDA driver version number to Major and Minor version numbers. + */ +#define NVML_CUDA_DRIVER_VERSION_MAJOR(v) ((v)/1000) +#define NVML_CUDA_DRIVER_VERSION_MINOR(v) (((v)%1000)/10) + +/** + * Gets name of the process with provided process id + * + * For all products. + * + * Returned process name is cropped to provided length. + * name string is encoded in ANSI. + * + * @param pid The identifier of the process + * @param name Reference in which to return the process name + * @param length The maximum allowed length of the string returned in \a name + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a name is NULL or \a length is 0. + * - \ref NVML_ERROR_NOT_FOUND if process doesn't exists + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlSystemGetProcessName(unsigned int pid, char *name, unsigned int length); + +/** + * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. + * + * For S-class products. + * + * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. + * The HIC must be connected to an S-class system for it to be reported by this function. + * + * @param hwbcCount Size of hwbcEntries array + * @param hwbcEntries Array holding information about hwbc + * + * @return + * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); + +/** + * Retrieve the set of GPUs that have a CPU affinity with the given CPU number + * For all products. + * Supported on Linux only. + * + * @param cpuNumber The CPU number + * @param count When zero, is set to the number of matching GPUs such that \a deviceArray + * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count + * number of device handles. + * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber + * + * @return + * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); + + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUnitQueries Unit Queries + * This chapter describes that queries that NVML can perform against each unit. For S-class systems only. + * In each case the device is identified with an nvmlUnit_t handle. This handle is obtained by + * calling \ref nvmlUnitGetHandleByIndex(). + * @{ + */ +/***************************************************************************************************/ + + /** + * Retrieves the number of units in the system. + * + * For S-class products. + * + * @param unitCount Reference in which to return the number of units + * + * @return + * - \ref NVML_SUCCESS if \a unitCount has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetCount(unsigned int *unitCount); + +/** + * Acquire the handle for a particular unit, based on its index. + * + * For S-class products. + * + * Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount(). + * For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1. + * + * The order in which NVML enumerates units has no guarantees of consistency between reboots. + * + * @param index The index of the target unit, >= 0 and < \a unitCount + * @param unit Reference in which to return the unit handle + * + * @return + * - \ref NVML_SUCCESS if \a unit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t *unit); + +/** + * Retrieves the static information associated with a unit. + * + * For S-class products. + * + * See \ref nvmlUnitInfo_t for details on available unit info. + * + * @param unit The identifier of the target unit + * @param info Reference in which to return the unit information + * + * @return + * - \ref NVML_SUCCESS if \a info has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL + */ +nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t *info); + +/** + * Retrieves the LED state associated with this unit. + * + * For S-class products. + * + * See \ref nvmlLedState_t for details on allowed states. + * + * @param unit The identifier of the target unit + * @param state Reference in which to return the current LED state + * + * @return + * - \ref NVML_SUCCESS if \a state has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlUnitSetLedState() + */ +nvmlReturn_t DECLDIR nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t *state); + +/** + * Retrieves the PSU stats for the unit. + * + * For S-class products. + * + * See \ref nvmlPSUInfo_t for details on available PSU info. + * + * @param unit The identifier of the target unit + * @param psu Reference in which to return the PSU information + * + * @return + * - \ref NVML_SUCCESS if \a psu has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t *psu); + +/** + * Retrieves the temperature readings for the unit, in degrees C. + * + * For S-class products. + * + * Depending on the product, readings may be available for intake (type=0), + * exhaust (type=1) and board (type=2). + * + * @param unit The identifier of the target unit + * @param type The type of reading to take + * @param temp Reference in which to return the intake temperature + * + * @return + * - \ref NVML_SUCCESS if \a temp has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int *temp); + +/** + * Retrieves the fan speed readings for the unit. + * + * For S-class products. + * + * See \ref nvmlUnitFanSpeeds_t for details on available fan speed info. + * + * @param unit The identifier of the target unit + * @param fanSpeeds Reference in which to return the fan speed information + * + * @return + * - \ref NVML_SUCCESS if \a fanSpeeds has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t *fanSpeeds); + +/** + * Retrieves the set of GPU devices that are attached to the specified unit. + * + * For S-class products. + * + * The \a deviceCount argument is expected to be set to the size of the input \a devices array. + * + * @param unit The identifier of the target unit + * @param deviceCount Reference in which to provide the \a devices array size, and + * to return the number of attached GPU devices + * @param devices Reference in which to return the references to the attached GPU devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int *deviceCount, nvmlDevice_t *devices); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceQueries Device Queries + * This chapter describes that queries that NVML can perform against each device. + * In each case the device is identified with an nvmlDevice_t handle. This handle is obtained by + * calling one of \ref nvmlDeviceGetHandleByIndex_v2(), \ref nvmlDeviceGetHandleBySerial(), + * \ref nvmlDeviceGetHandleByPciBusId_v2(). or \ref nvmlDeviceGetHandleByUUID(). + * @{ + */ +/***************************************************************************************************/ + + /** + * Retrieves the number of compute devices in the system. A compute device is a single GPU. + * + * For all products. + * + * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system + * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. + * Update your code to handle this error, or use NVML 4.304 or older nvml header file. + * For backward binary compatibility reasons _v1 version of the API is still present in the shared + * library. + * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. + * + * @param deviceCount Reference in which to return the number of accessible devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCount_v2(unsigned int *deviceCount); + +/** + * Get attributes (engine counts etc.) for the given NVML device handle. + * + * @note This API currently only supports MIG device handles. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device NVML device handle + * @param attributes Device attributes + * + * @return + * - \ref NVML_SUCCESS if \a device attributes were successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is invalid + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAttributes_v2(nvmlDevice_t device, nvmlDeviceAttributes_t *attributes); + +/** + * Acquire the handle for a particular device, based on its index. + * + * For all products. + * + * Valid indices are derived from the \a accessibleDevices count returned by + * \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it + * is recommended that devices be looked up by their PCI ids or UUID. See + * \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId_v2(). + * + * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs if: + * - The target GPU is an SLI slave + * + * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system + * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. + * Update your code to handle this error, or use NVML 4.304 or older nvml header file. + * For backward binary compatibility reasons _v1 version of the API is still present in the shared + * library. + * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. + * + * This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index. + * If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't + * need to worry about that. + * + * @param index The index of the target GPU, >= 0 and < \a accessibleDevices + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetIndex + * @see nvmlDeviceGetCount + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex_v2(unsigned int index, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its board serial number. + * + * For Fermi &tm; or newer fully supported devices. + * + * This number corresponds to the value printed directly on the board, and to the value returned by + * \ref nvmlDeviceGetSerial(). + * + * @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor + * of \ref nvmlDeviceGetHandleByUUID. + * For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT. + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs as it searches for the target GPU + * + * @param serial The board serial number of the target GPU + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one + * device has the same serial (dual GPU boards) + * - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetSerial + * @see nvmlDeviceGetHandleByUUID + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device. + * + * For all products. + * + * @param uuid The UUID of the target GPU or MIG instance + * @param device Reference in which to return the device handle or MIG device handle + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs as it searches for the target GPU + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null + * - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetUUID + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its PCI bus id. + * + * For all products. + * + * This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo_v3(). + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs if: + * - The target GPU is an SLI slave + * + * \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND + * instead of NVML_ERROR_NO_PERMISSION. + * + * @param pciBusId The PCI bus id of the target GPU + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId_v2(const char *pciBusId, nvmlDevice_t *device); + +/** + * Retrieves the name of this device. + * + * For all products. + * + * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not + * exceed 96 characters in length (including the NULL terminator). See \ref + * nvmlConstants::NVML_DEVICE_NAME_V2_BUFFER_SIZE. + * + * When used with MIG device handles the API returns MIG device names which can be used to identify devices + * based on their attributes. + * + * @param device The identifier of the target device + * @param name Reference in which to return the product name + * @param length The maximum allowed length of the string returned in \a name + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length); + +/** + * Retrieves the brand of this device. + * + * For all products. + * + * The type is a member of \ref nvmlBrandType_t defined above. + * + * @param device The identifier of the target device + * @param type Reference in which to return the product brand type + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t *type); + +/** + * Retrieves the NVML index of this device. + * + * For all products. + * + * Valid indices are derived from the \a accessibleDevices count returned by + * \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it + * is recommended that devices be looked up by their PCI ids or GPU UUID. See + * \ref nvmlDeviceGetHandleByPciBusId_v2() and \ref nvmlDeviceGetHandleByUUID(). + * + * When used with MIG device handles this API returns indices that can be + * passed to \ref nvmlDeviceGetMigDeviceHandleByIndex to retrieve an identical handle. + * MIG device indices are unique within a device. + * + * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. + * + * @param device The identifier of the target device + * @param index Reference in which to return the NVML index of the device + * + * @return + * - \ref NVML_SUCCESS if \a index has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetHandleByIndex() + * @see nvmlDeviceGetCount() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int *index); + +/** + * Retrieves the globally unique board serial number associated with this device's board. + * + * For all products with an inforom. + * + * The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator). + * This number matches the serial number tag that is physically attached to the board. See \ref + * nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param serial Reference in which to return the board/module serial number + * @param length The maximum allowed length of the string returned in \a serial + * + * @return + * - \ref NVML_SUCCESS if \a serial has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSerial(nvmlDevice_t device, char *serial, unsigned int length); + +/* +* Get a unique identifier for the device module on the baseboard +* +* This API retrieves a unique identifier for each GPU module that exists on a given baseboard. +* For non-baseboard products, this ID would always be 0. +* +* @param device The identifier of the target device +* @param moduleId Unique identifier for the GPU module +* +* @return +* - \ref NVML_SUCCESS if \a moduleId has been successfully retrieved +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a moduleId is invalid +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetModuleId(nvmlDevice_t device, unsigned int *moduleId); + +/** + * Retrieves the Device's C2C Mode information + * + * @param device The identifier of the target device + * @param c2cModeInfo Output struct containing the device's C2C Mode info + * + * @return + * - \ref NVML_SUCCESS if \a C2C Mode Infor query is successful + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetC2cModeInfoV(nvmlDevice_t device, nvmlC2cModeInfo_v1_t *c2cModeInfo); + +/***************************************************************************************************/ + +/** @defgroup nvmlAffinity CPU and Memory Affinity + * This chapter describes NVML operations that are associated with CPU and memory + * affinity. + * @{ + */ +/***************************************************************************************************/ + +//! Scope of NUMA node for affinity queries +#define NVML_AFFINITY_SCOPE_NODE 0 +//! Scope of processor socket for affinity queries +#define NVML_AFFINITY_SCOPE_SOCKET 1 + +typedef unsigned int nvmlAffinityScope_t; + +/** + * Retrieves an array of unsigned ints (sized to nodeSetSize) of bitmasks with + * the ideal memory affinity within node or socket for the device. + * For example, if NUMA node 0, 1 are ideal within the socket for the device and nodeSetSize == 1, + * result[0] = 0x3 + * + * \note If requested scope is not applicable to the target topology, the API + * will fall back to reporting the memory affinity for the immediate non-I/O + * ancestor of the device. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param nodeSetSize The size of the nodeSet array that is safe to access + * @param nodeSet Array reference in which to return a bitmask of NODEs, 64 NODEs per + * unsigned long on 64-bit machines, 32 on 32-bit machines + * @param scope Scope that change the default behavior + * + * @return + * - \ref NVML_SUCCESS if \a NUMA node Affinity has been filled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, nodeSetSize == 0, nodeSet is NULL or scope is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ + +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryAffinity(nvmlDevice_t device, unsigned int nodeSetSize, unsigned long *nodeSet, nvmlAffinityScope_t scope); + +/** + * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the + * ideal CPU affinity within node or socket for the device. + * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, + * result[0] = 0x3, result[1] = 0x3 + * + * \note If requested scope is not applicable to the target topology, the API + * will fall back to reporting the CPU affinity for the immediate non-I/O + * ancestor of the device. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param cpuSetSize The size of the cpuSet array that is safe to access + * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per + * unsigned long on 64-bit machines, 32 on 32-bit machines + * @param scope Scope that change the default behavior + * + * @return + * - \ref NVML_SUCCESS if \a cpuAffinity has been filled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, cpuSet is NULL or sope is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ + +nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinityWithinScope(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet, nvmlAffinityScope_t scope); + +/** + * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device + * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, + * result[0] = 0x3, result[1] = 0x3 + * This is equivalent to calling \ref nvmlDeviceGetCpuAffinityWithinScope with \ref NVML_AFFINITY_SCOPE_NODE. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param cpuSetSize The size of the cpuSet array that is safe to access + * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per + * unsigned long on 64-bit machines, 32 on 32-bit machines + * + * @return + * - \ref NVML_SUCCESS if \a cpuAffinity has been filled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet); + +/** + * Sets the ideal affinity for the calling thread and device using the guidelines + * given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0. + * Older versions set the affinity for a calling process and all children. + * Currently supports up to 1024 processors. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if the calling process has been successfully bound + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetCpuAffinity(nvmlDevice_t device); + +/** + * Clear all affinity bindings for the calling thread. Note, this is a change as of version + * 8.0 as older versions cleared the affinity for a calling process and all children. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if the calling process has been successfully unbound + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceClearCpuAffinity(nvmlDevice_t device); + +/** + * Get the NUMA node of the given GPU device. + * This only applies to platforms where the GPUs are NUMA nodes. + * + * @param[in] device The device handle + * @param[out] node NUMA node ID of the device + * + * @returns + * - \ref NVML_SUCCESS if the NUMA node is retrieved successfully + * - \ref NVML_ERROR_NOT_SUPPORTED if request is not supported on the current platform + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device \a node is invalid + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumaNodeId(nvmlDevice_t device, unsigned int *node); +/** + * Retrieve the common ancestor for two devices + * For all products. + * Supported on Linux only. + * + * @param device1 The identifier of the first device + * @param device2 The identifier of the second device + * @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type + * + * @return + * - \ref NVML_SUCCESS if \a pathInfo has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ + +/** @} */ +nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t *pathInfo); + +/** + * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level + * For all products. + * Supported on Linux only. + * + * @param device The identifier of the first device + * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs + * @param count When zero, is set to the number of matching GPUs such that \a deviceArray + * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count + * number of device handles. + * @param deviceArray An array of device handles for GPUs found at \a level + * + * @return + * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int *count, nvmlDevice_t *deviceArray); + +/** + * Retrieve the status for a given p2p capability index between a given pair of GPU + * + * @param device1 The first device + * @param device2 The second device + * @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2 + * @param p2pStatus Reference in which to return the status of the \a p2pIndex + * between \a device1 and \a device2 + * @return + * - \ref NVML_SUCCESS if \a p2pStatus has been populated + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex,nvmlGpuP2PStatus_t *p2pStatus); + +/** + * Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, + * that augments the immutable, board serial identifier. + * + * For all products. + * + * The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products. + * It does NOT correspond to any identifier printed on the board. It will not exceed 96 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_V2_BUFFER_SIZE. + * + * When used with MIG device handles the API returns globally unique UUIDs which can be used to identify MIG + * devices across both GPU and MIG devices. UUIDs are immutable for the lifetime of a MIG device. + * + * @param device The identifier of the target device + * @param uuid Reference in which to return the GPU UUID + * @param length The maximum allowed length of the string returned in \a uuid + * + * @return + * - \ref NVML_SUCCESS if \a uuid has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length); + +/** + * Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for + * each GPU will have the form /dev/nvidia[minor number]. + * + * For all products. + * Supported only for Linux + * + * @param device The identifier of the target device + * @param minorNumber Reference in which to return the minor number for the device + * @return + * - \ref NVML_SUCCESS if the minor number is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber); + +/** + * Retrieves the the device board part number which is programmed into the board's InfoROM + * + * For all products. + * + * @param device Identifier of the target device + * @param partNumber Reference to the buffer to return + * @param length Length of the buffer reference + * + * @return + * - \ref NVML_SUCCESS if \a partNumber has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length); + +/** + * Retrieves the version information for the device's infoROM object. + * + * For all products with an inforom. + * + * Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate + * ECC counts. The version of the data structures in this memory may change from time to time. It will not + * exceed 16 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. + * + * See \ref nvmlInforomObject_t for details on the available infoROM objects. + * + * @param device The identifier of the target device + * @param object The target infoROM object + * @param version Reference in which to return the infoROM version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomImageVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char *version, unsigned int length); + +/** + * Retrieves the global infoROM image version + * + * For all products with an inforom. + * + * Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board + * in contrast to infoROM object version which is only an indicator of supported features. + * Version string will not exceed 16 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param version Reference in which to return the infoROM image version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char *version, unsigned int length); + +/** + * Retrieves the checksum of the configuration stored in the device's infoROM. + * + * For all products with an inforom. + * + * Can be used to make sure that two GPUs have the exact same configuration. + * Current checksum takes into account configuration stored in PWR and ECC infoROM objects. + * Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC) + * + * @param device The identifier of the target device + * @param checksum Reference in which to return the infoROM configuration checksum + * + * @return + * - \ref NVML_SUCCESS if \a checksum has been set + * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int *checksum); + +/** + * Reads the infoROM from the flash and verifies the checksums. + * + * For all products with an inforom. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if infoROM is not corrupted + * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceValidateInforom(nvmlDevice_t device); + +/** + * Retrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run. + * + * For all products with an inforom. + * + * @param device The identifier of the target device + * @param timestamp The start timestamp of the last BBX Flush + * @param durationUs The duration (us) of the last BBX Flush + * + * @return + * - \ref NVML_SUCCESS if \a timestamp and \a durationUs are successfully retrieved + * - \ref NVML_ERROR_NOT_READY if the BBX object has not been flushed yet + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetLastBBXFlushTime(nvmlDevice_t device, unsigned long long *timestamp, + unsigned long *durationUs); + +/** + * Retrieves the display mode for the device. + * + * For all products. + * + * This method indicates whether a physical display (e.g. monitor) is currently connected to + * any of the device's connectors. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param display Reference in which to return the display mode + * + * @return + * - \ref NVML_SUCCESS if \a display has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t *display); + +/** + * Retrieves the display active state for the device. + * + * For all products. + * + * This method indicates whether a display is initialized on the device. + * For example whether X Server is attached to this device and has allocated memory for the screen. + * + * Display can be active even when no monitor is physically attached. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param isActive Reference in which to return the display active state + * + * @return + * - \ref NVML_SUCCESS if \a isActive has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t *isActive); + +/** + * Retrieves the persistence mode associated with this device. + * + * For all products. + * For Linux only. + * + * When driver persistence mode is enabled the driver software state is not torn down when the last + * client disconnects. By default this feature is disabled. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current driver persistence mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetPersistenceMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Retrieves PCI attributes of this device. + * + * For all products. + * + * See \ref nvmlPciInfoExt_t for details on the available PCI info. + * + * @param device The identifier of the target device + * @param pci Reference in which to return the PCI info + * + * @return + * - \ref NVML_SUCCESS if \a pci has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfoExt(nvmlDevice_t device, nvmlPciInfoExt_t *pci); + +/** + * Retrieves the PCI attributes of this device. + * + * For all products. + * + * See \ref nvmlPciInfo_t for details on the available PCI info. + * + * @param device The identifier of the target device + * @param pci Reference in which to return the PCI info + * + * @return + * - \ref NVML_SUCCESS if \a pci has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo_v3(nvmlDevice_t device, nvmlPciInfo_t *pci); + +/** + * Retrieves the maximum PCIe link generation possible with this device and system + * + * I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will + * report is generation 1. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkGen Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkGen has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen); + +/** + * Retrieves the maximum PCIe link generation supported by this device + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkGenDevice Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkGenDevice has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGenDevice is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGenDevice); + +/** + * Retrieves the maximum PCIe link width possible with this device and system + * + * I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report + * a max link width of 8. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkWidth Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkWidth has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int *maxLinkWidth); + +/** + * Retrieves the current PCIe link generation + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param currLinkGen Reference in which to return the current PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a currLinkGen has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int *currLinkGen); + +/** + * Retrieves the current PCIe link width + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param currLinkWidth Reference in which to return the current PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a currLinkWidth has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int *currLinkWidth); + +/** + * Retrieve PCIe utilization information. + * This function is querying a byte counter over a 20ms interval and thus is the + * PCIe throughput over that interval. + * + * For Maxwell &tm; or newer fully supported devices. + * + * This method is not supported in virtual machines running virtual GPU (vGPU). + * + * @param device The identifier of the target device + * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t + * @param value Reference in which to return throughput in KB/s + * + * @return + * - \ref NVML_SUCCESS if \a value has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int *value); + +/** + * Retrieve the PCIe replay counter. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param value Reference in which to return the counter's value + * + * @return + * - \ref NVML_SUCCESS if \a value has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int *value); + +/** + * Retrieves the current clock speeds for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlClockType_t for details on available clock information. + * + * @param device The identifier of the target device + * @param type Identify which clock domain to query + * @param clock Reference in which to return the clock speed in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clock has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); + +/** + * Retrieves the maximum clock speeds for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlClockType_t for details on available clock information. + * + * \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks + * by few MHz. + * + * @param device The identifier of the target device + * @param type Identify which clock domain to query + * @param clock Reference in which to return the clock speed in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clock has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); + +/** + * Retrieve the GPCCLK VF offset value + * @param[in] device The identifier of the target device + * @param[out] offset The retrieved GPCCLK VF offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t device, int *offset); + +/** + * Retrieves the current setting of a clock that applications will use unless an overspec situation occurs. + * Can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Retrieves the default applications clock that GPU boots with or + * defaults to after \ref nvmlDeviceResetApplicationsClocks call. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the default clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * \see nvmlDeviceGetApplicationsClock + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDefaultApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Retrieves the clock speed for the clock specified by the clock type and clock ID. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockId Identify which clock in the domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int *clockMHz); + +/** + * Retrieves the customer defined maximum boost clock speed specified by the given clock type. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param count Reference in which to provide the \a clocksMHz array size, and + * to return the number of elements + * @param clocksMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of + * required elements) + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetSupportedGraphicsClocks + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int *count, unsigned int *clocksMHz); + +/** + * Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param memoryClockMHz Memory clock for which to return possible graphics clocks + * @param count Reference in which to provide the \a clocksMHz array size, and + * to return the number of elements + * @param clocksMHz Reference in which to return the clocks in MHz + * + * @return + * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetSupportedMemoryClocks + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); + +/** + * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled + * + * For Kepler &tm; or newer fully supported devices. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. + * + * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device + * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will + * revert to when no applications are using the GPU + * + * @return + * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); + +/** + * Retrieves the intended operating speed of the device's fan. + * + * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the + * output will not match the actual fan speed. + * + * For all discrete products with dedicated fans. + * + * The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed. + * This value may exceed 100% in certain cases. + * + * @param device The identifier of the target device + * @param speed Reference in which to return the fan speed percentage + * + * @return + * - \ref NVML_SUCCESS if \a speed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed); + + +/** + * Retrieves the intended operating speed of the device's specified fan. + * + * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the + * output will not match the actual fan speed. + * + * For all discrete products with dedicated fans. + * + * The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed. + * This value may exceed 100% in certain cases. + * + * @param device The identifier of the target device + * @param fan The index of the target fan, zero indexed. + * @param speed Reference in which to return the fan speed percentage + * + * @return + * - \ref NVML_SUCCESS if \a speed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int * speed); + +/** + * Retrieves the intended target speed of the device's specified fan. + * + * Normally, the driver dynamically adjusts the fan based on + * the needs of the GPU. But when user set fan speed using nvmlDeviceSetFanSpeed_v2, + * the driver will attempt to make the fan achieve the setting in + * nvmlDeviceSetFanSpeed_v2. The actual current speed of the fan + * is reported in nvmlDeviceGetFanSpeed_v2. + * + * For all discrete products with dedicated fans. + * + * The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed. + * This value may exceed 100% in certain cases. + * + * @param device The identifier of the target device + * @param fan The index of the target fan, zero indexed. + * @param targetSpeed Reference in which to return the fan speed percentage + * + * @return + * - \ref NVML_SUCCESS if \a speed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTargetFanSpeed(nvmlDevice_t device, unsigned int fan, unsigned int *targetSpeed); + +/** + * Retrieves the min and max fan speed that user can set for the GPU fan. + * + * For all cuda-capable discrete products with fans + * + * @param device The identifier of the target device + * @param minSpeed The minimum speed allowed to set + * @param maxSpeed The maximum speed allowed to set + * + * return + * NVML_SUCCESS if speed has been adjusted + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if device is invalid + * NVML_ERROR_NOT_SUPPORTED if the device does not support this + * (doesn't have fans) + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMinMaxFanSpeed(nvmlDevice_t device, unsigned int * minSpeed, + unsigned int * maxSpeed); + +/** + * Gets current fan control policy. + * + * For Maxwell &tm; or newer fully supported devices. + * + * For all cuda-capable discrete products with fans + * + * device The identifier of the target \a device + * policy Reference in which to return the fan control \a policy + * + * return + * NVML_SUCCESS if \a policy has been populated + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference + * a fan that exists. + * NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanControlPolicy_v2(nvmlDevice_t device, unsigned int fan, + nvmlFanControlPolicy_t *policy); + +/** + * Retrieves the number of fans on the device. + * + * For all discrete products with dedicated fans. + * + * @param device The identifier of the target device + * @param numFans The number of fans + * + * @return + * - \ref NVML_SUCCESS if \a fan number query was successful + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a numFans is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int *numFans); + +/** + * Retrieves the current temperature readings for the device, in degrees C. + * + * For all products. + * + * See \ref nvmlTemperatureSensors_t for details on available temperature sensors. + * + * @param device The identifier of the target device + * @param sensorType Flag that indicates which sensor reading to retrieve + * @param temp Reference in which to return the temperature reading + * + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); + +/** + * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. + * + * For Kepler &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * Note: This API is no longer the preferred interface for retrieving the following temperature thresholds + * on Ada and later architectures: NVML_TEMPERATURE_THRESHOLD_SHUTDOWN, NVML_TEMPERATURE_THRESHOLD_SLOWDOWN, + * NVML_TEMPERATURE_THRESHOLD_MEM_MAX and NVML_TEMPERATURE_THRESHOLD_GPU_MAX. + * + * Support for reading these temperature thresholds for Ada and later architectures would be removed from this + * API in future releases. Please use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_TEMPERATURE_* fields to retrieve + * temperature thresholds on these architectures. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value queried + * @param temp Reference in which to return the temperature reading + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); + +/** + * Used to execute a list of thermal system instructions. + * + * @param device The identifier of the target device + * @param sensorIndex The index of the thermal sensor + * @param pThermalSettings Reference in which to return the thermal sensor information + * + * @return + * - \ref NVML_SUCCESS if \a pThermalSettings has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pThermalSettings is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetThermalSettings(nvmlDevice_t device, unsigned int sensorIndex, nvmlGpuThermalSettings_t *pThermalSettings); + +/** + * Retrieves the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * Retrieves current clocks event reasons. + * + * For all fully supported products. + * + * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. + * + * @param device The identifier of the target device + * @param clocksEventReasons Reference in which to return bitmask of active clocks event + * reasons + * + * @return + * - \ref NVML_SUCCESS if \a clocksEventReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksEventReasons is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksEventReasons + * @see nvmlDeviceGetSupportedClocksEventReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice_t device, unsigned long long *clocksEventReasons); + +/** + * @deprecated Use \ref nvmlDeviceGetCurrentClocksEventReasons instead + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); + +/** + * Retrieves bitmask of supported clocks event reasons that can be returned by + * \ref nvmlDeviceGetCurrentClocksEventReasons + * + * For all fully supported products. + * + * This method is not supported in virtual machines running virtual GPU (vGPU). + * + * @param device The identifier of the target device + * @param supportedClocksEventReasons Reference in which to return bitmask of supported + * clocks event reasons + * + * @return + * - \ref NVML_SUCCESS if \a supportedClocksEventReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksEventReasons is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksEventReasons + * @see nvmlDeviceGetCurrentClocksEventReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice_t device, unsigned long long *supportedClocksEventReasons); + +/** + * @deprecated Use \ref nvmlDeviceGetSupportedClocksEventReasons instead + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); + +/** + * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. + * + * Retrieve the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * Retrieve performance monitor samples from the associated subdevice. + * + * @param device + * @param pDynamicPstatesInfo + * + * @return + * - \ref NVML_SUCCESS if \a pDynamicPstatesInfo has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pDynamicPstatesInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t device, nvmlGpuDynamicPstatesInfo_t *pDynamicPstatesInfo); + +/** + * Retrieve the MemClk (Memory Clock) VF offset value. + * @param[in] device The identifier of the target device + * @param[out] offset The retrieved MemClk VF offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemClkVfOffset(nvmlDevice_t device, int *offset); + +/** + * Retrieve min and max clocks of some clock domain for a given PState + * + * @param device The identifier of the target device + * @param type Clock domain + * @param pstate PState to query + * @param minClockMHz Reference in which to return min clock frequency + * @param maxClockMHz Reference in which to return max clock frequency + * + * @return + * - \ref NVML_SUCCESS if everything worked + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a type or \a pstate are invalid or both + * \a minClockMHz and \a maxClockMHz are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t device, nvmlClockType_t type, nvmlPstates_t pstate, + unsigned int * minClockMHz, unsigned int * maxClockMHz); + +/** + * Get all supported Performance States (P-States) for the device. + * + * The returned array would contain a contiguous list of valid P-States supported by + * the device. If the number of supported P-States is fewer than the size of the array + * supplied missing elements would contain \a NVML_PSTATE_UNKNOWN. + * + * The number of elements in the returned list will never exceed \a NVML_MAX_GPU_PERF_PSTATES. + * + * @param device The identifier of the target device + * @param pstates Container to return the list of performance states + * supported by device + * @param size Size of the supplied \a pstates array in bytes + * + * @return + * - \ref NVML_SUCCESS if \a pstates array has been retrieved + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if the the container supplied was not large enough to + * hold the resulting list + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a pstates is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support performance state readings + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t device, + nvmlPstates_t *pstates, unsigned int size); + +/** + * Retrieve the GPCCLK min max VF offset value. + * @param[in] device The identifier of the target device + * @param[out] minOffset The retrieved GPCCLK VF min offset value + * @param[out] maxOffset The retrieved GPCCLK VF max offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t device, + int *minOffset, int *maxOffset); + +/** + * Retrieve the MemClk (Memory Clock) min max VF offset value. + * @param[in] device The identifier of the target device + * @param[out] minOffset The retrieved MemClk VF min offset value + * @param[out] maxOffset The retrieved MemClk VF max offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t device, + int *minOffset, int *maxOffset); + +/** + * This API has been deprecated. + * + * Retrieves the power management mode associated with this device. + * + * For products from the Fermi family. + * - Requires \a NVML_INFOROM_POWER version 3.0 or higher. + * + * For from the Kepler or newer families. + * - Does not require \a NVML_INFOROM_POWER object. + * + * This flag indicates whether any power management algorithm is currently active on the device. An + * enabled state does not necessarily mean the device is being actively throttled -- only that + * that the driver will do so if the appropriate conditions are met. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current power management mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Retrieves the power management limit associated with this device. + * + * For Fermi &tm; or newer fully supported devices. + * + * The power limit defines the upper boundary for the card's power draw. If + * the card's total power draw reaches this limit the power management algorithm kicks in. + * + * This reading is only available if power management mode is supported. + * See \ref nvmlDeviceGetPowerManagementMode. + * + * @param device The identifier of the target device + * @param limit Reference in which to return the power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int *limit); + +/** + * Retrieves information about possible values of power management limits on this device. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minLimit Reference in which to return the minimum power management limit in milliwatts + * @param maxLimit Reference in which to return the maximum power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetPowerManagementLimit + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int *minLimit, unsigned int *maxLimit); + +/** + * Retrieves default power management limit on this device, in milliwatts. + * Default power management limit is a power management limit that the device boots with. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param defaultLimit Reference in which to return the default power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a defaultLimit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int *defaultLimit); + +/** + * Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory) + * + * For Fermi &tm; or newer fully supported devices. + * + * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. On Ampere + * (except GA100) or newer GPUs, the API returns power averaged over 1 sec interval. On GA100 and + * older architectures, instantaneous power is returned. + * + * See \ref NVML_FI_DEV_POWER_AVERAGE and \ref NVML_FI_DEV_POWER_INSTANT to query specific power + * values. + * + * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode. + * + * @param device The identifier of the target device + * @param power Reference in which to return the power usage information + * + * @return + * - \ref NVML_SUCCESS if \a power has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power); + +/** + * Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded + * + * For Volta &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param energy Reference in which to return the energy consumption information + * + * @return + * - \ref NVML_SUCCESS if \a energy has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long *energy); + +/** + * Get the effective power limit that the driver enforces after taking into account all limiters + * + * Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere + * This includes the out of band power limit interface + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The device to communicate with + * @param limit Reference in which to return the power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int *limit); + +/** + * Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot). + * + * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. + * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. + * Not supported on Quadro ® and Tesla &tm; C-class products. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current GOM + * @param pending Reference in which to return the pending GOM + * + * @return + * - \ref NVML_SUCCESS if \a mode has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlGpuOperationMode_t + * @see nvmlDeviceSetGpuOperationMode + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t *current, nvmlGpuOperationMode_t *pending); + +/** + * Retrieves the amount of used, free, reserved and total memory available on the device, in bytes. + * The reserved amount is supported on version 2 only. + * + * For all products. + * + * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. + * Under WDDM most device memory is allocated and managed on startup by Windows. + * + * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated + * by all active channels on the device. + * + * See \ref nvmlMemory_v2_t for details on available memory info. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate + * information, only if the caller has appropriate privileges. Per-instance + * information can be queried by using specific MIG device handles. + * + * @note nvmlDeviceGetMemoryInfo_v2 adds additional memory information. + * + * @note On systems where GPUs are NUMA nodes, the accuracy of FB memory utilization + * provided by this API depends on the memory accounting of the operating system. + * This is because FB memory is managed by the operating system instead of the NVIDIA GPU driver. + * Typically, pages allocated from FB memory are not released even after + * the process terminates to enhance performance. In scenarios where + * the operating system is under memory pressure, it may resort to utilizing FB memory. + * Such actions can result in discrepancies in the accuracy of memory reporting. + * + * @param device The identifier of the target device + * @param memory Reference in which to return the memory information + * + * @return + * - \ref NVML_SUCCESS if \a memory has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t device, nvmlMemory_v2_t *memory); + +/** + * Retrieves the current compute mode for the device. + * + * For all products. + * + * See \ref nvmlComputeMode_t for details on allowed compute modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current compute mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetComputeMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t *mode); + +/** + * Retrieves the CUDA compute capability of the device. + * + * For all products. + * + * Returns the major and minor compute capability version numbers of the + * device. The major and minor versions are equivalent to the + * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and + * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be + * returned by CUDA's cuDeviceGetAttribute(). + * + * @param device The identifier of the target device + * @param major Reference in which to return the major CUDA compute capability + * @param minor Reference in which to return the minor CUDA compute capability + * + * @return + * - \ref NVML_SUCCESS if \a major and \a minor have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int *major, int *minor); + +/** + * Retrieves the current and pending ECC modes for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * + * Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following + * the next reboot. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current ECC mode + * @param pending Reference in which to return the pending ECC mode + * + * @return + * - \ref NVML_SUCCESS if \a current and \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t *current, nvmlEnableState_t *pending); + +/** + * Retrieves the default ECC modes for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param defaultMode Reference in which to return the default ECC mode + * + * @return + * - \ref NVML_SUCCESS if \a current and \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a default is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDefaultEccMode(nvmlDevice_t device, nvmlEnableState_t *defaultMode); + +/** + * Retrieves the device boardId from 0-N. + * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with + * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. + * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across + * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and + * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will + * always return those values but they will always be different from each other). + * + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param boardId Reference in which to return the device's board ID + * + * @return + * - \ref NVML_SUCCESS if \a boardId has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int *boardId); + +/** + * Retrieves whether the device is on a Multi-GPU Board + * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param multiGpuBool Reference in which to return a zero or non-zero value + * to indicate whether the device is on a multi GPU board + * + * @return + * - \ref NVML_SUCCESS if \a multiGpuBool has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int *multiGpuBool); + +/** + * Retrieves the total ECC error counts for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * Requires ECC Mode to be enabled. + * + * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of + * errors across the entire device. + * + * See \ref nvmlMemoryErrorType_t for a description of available error types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types. + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of the errors. + * @param counterType Flag that specifies the counter-type of the errors. + * @param eccCounts Reference in which to return the specified ECC errors + * + * @return + * - \ref NVML_SUCCESS if \a eccCounts has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceClearEccErrorCounts() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long *eccCounts); + +/** + * Retrieves the detailed ECC error counts for the device. + * + * @deprecated This API supports only a fixed set of ECC error locations + * On different GPU architectures different locations are supported + * See \ref nvmlDeviceGetMemoryErrorCounter + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts. + * Requires ECC Mode to be enabled. + * + * Detailed errors provide separate ECC counts for specific parts of the memory system. + * + * Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported. + * + * See \ref nvmlMemoryErrorType_t for a description of available bit types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types.\n + * See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts. + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of the errors. + * @param counterType Flag that specifies the counter-type of the errors. + * @param eccCounts Reference in which to return the specified ECC errors + * + * @return + * - \ref NVML_SUCCESS if \a eccCounts has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceClearEccErrorCounts() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDetailedEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlEccErrorCounts_t *eccCounts); + +/** + * Retrieves the requested memory error counter for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. + * + * Only applicable to devices with ECC. + * + * Requires ECC Mode to be enabled. + * + * @note On MIG-enabled GPUs, per instance information can be queried using specific + * MIG device handles. Per instance information is currently only supported for + * non-DRAM uncorrectable volatile errors. Querying volatile errors using device + * handles is currently not supported. + * + * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types.\n + * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of error. + * @param counterType Flag that specifies the counter-type of the errors. + * @param locationType Specifies the location of the counter. + * @param count Reference in which to return the ECC counter + * + * @return + * - \ref NVML_SUCCESS if \a count has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is + * invalid, or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, + nvmlEccCounterType_t counterType, + nvmlMemoryLocation_t locationType, unsigned long long *count); + +/** + * Retrieves the current utilization rates for the device's major subsystems. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlUtilization_t for details on available utilization rates. + * + * \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings. + * This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization. + * + * @note On MIG-enabled GPUs, querying device utilization rates is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference in which to return the utilization information + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization); + +/** + * Retrieves the current utilization and sampling size in microseconds for the Encoder + * + * For Kepler &tm; or newer fully supported devices. + * + * @note On MIG-enabled GPUs, querying encoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for encoder utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** + * Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param encoderQueryType Type of encoder to query + * @param encoderCapacity Reference to an unsigned int for the encoder capacity + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderCapacity (nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int *encoderCapacity); + +/** + * Retrieves the current encoder statistics for a given device. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param sessionCount Reference to an unsigned int for count of active encoder sessions + * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions + * @param averageLatency Reference to an unsigned int for encode latency in microseconds + * + * @return + * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps, + * or \a averageLatency is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned int *sessionCount, + unsigned int *averageFps, unsigned int *averageLatency); + +/** + * Retrieves information about active encoder sessions on a target device. + * + * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The + * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions + * written to the buffer. + * + * If the supplied buffer is not large enough to accommodate the active session array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. + * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return + * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param sessionCount Reference to caller supplied array size, and returns the number of sessions. + * @param sessionInfos Reference in which to return the session information + * + * @return + * - \ref NVML_SUCCESS if \a sessionInfos is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfos); + +/** + * Retrieves the current utilization and sampling size in microseconds for the Decoder + * + * For Kepler &tm; or newer fully supported devices. + * + * @note On MIG-enabled GPUs, querying decoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for decoder utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** + * Retrieves the current utilization and sampling size in microseconds for the JPG + * + * %TURING_OR_NEWER% + * + * @note On MIG-enabled GPUs, querying decoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for jpg utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetJpgUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** + * Retrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator) + * + * %TURING_OR_NEWER% + * + * @note On MIG-enabled GPUs, querying decoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for ofa utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetOfaUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** +* Retrieves the active frame buffer capture sessions statistics for a given device. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @param device The identifier of the target device +* @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats +* +* @return +* - \ref NVML_SUCCESS if \a fbcStats is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a fbcStats is NULL +* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetFBCStats(nvmlDevice_t device, nvmlFBCStats_t *fbcStats); + +/** +* Retrieves information about active frame buffer capture sessions on a target device. +* +* An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The +* array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions +* written to the buffer. +* +* If the supplied buffer is not large enough to accommodate the active session array, the function returns +* NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. +* To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return +* NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may +* be zero if there are no new frames captured since the session started. +* +* @param device The identifier of the target device +* @param sessionCount Reference to caller supplied array size, and returns the number of sessions. +* @param sessionInfo Reference in which to return the session information +* +* @return +* - \ref NVML_SUCCESS if \a sessionInfo is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. +* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlFBCSessionInfo_t *sessionInfo); + +/** + * Retrieves the current and pending driver model for the device. + * + * For Fermi &tm; or newer fully supported devices. + * For windows only. + * + * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached + * to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached. + * + * See \ref nvmlDriverModel_t for details on available driver models. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current driver model + * @param pending Reference in which to return the pending driver model + * + * @return + * - \ref NVML_SUCCESS if either \a current and/or \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetDriverModel() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); + +/** + * Get VBIOS version of the device. + * + * For all products. + * + * The VBIOS version may change from time to time. It will not exceed 32 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param version Reference to which to return the VBIOS version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char *version, unsigned int length); + +/** + * Get Bridge Chip Information for all the bridge chips on the board. + * + * For all fully supported products. + * Only applicable to multi-GPU products. + * + * @param device The identifier of the target device + * @param bridgeHierarchy Reference to the returned bridge chip Hierarchy + * + * @return + * - \ref NVML_SUCCESS if bridge chip exists + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t *bridgeHierarchy); + +/** + * Get information about processes with a compute context on a device + * + * For Fermi &tm; or newer fully supported devices. + * + * This function returns information only about compute running processes (e.g. CUDA application which have + * active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function. + * + * To query the current number of running compute processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new compute processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. + * + * @param device The device handle or MIG device handle + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Get information about processes with a graphics context on a device + * + * For Kepler &tm; or newer fully supported devices. + * + * This function returns information only about graphics based processes + * (eg. applications using OpenGL, DirectX) + * + * To query the current number of running graphics processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new graphics processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. + * + * @param device The device handle or MIG device handle + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Get information about processes with a MPS compute context on a device + * + * For Volta &tm; or newer fully supported devices. + * + * This function returns information only about compute running processes (e.g. CUDA application which have + * active context) utilizing MPS. Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by + * this function. + * + * To query the current number of running compute processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new compute processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. + * + * @param device The device handle or MIG device handle + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Get information about running processes on a device for input context + * + * %HOPPER_OR_NEWER% + * + * This function returns information only about running processes (e.g. CUDA application which have + * active context). + * + * To determine the size of the @ref plist->procArray array to allocate, call the function with + * @ref plist->numProcArrayEntries set to zero and @ref plist->procArray set to NULL. The return + * code will be either NVML_ERROR_INSUFFICIENT_SIZE (if there are valid processes of type + * @ref plist->mode to report on, in which case the @ref plist->numProcArrayEntries field will + * indicate the required number of entries in the array) or NVML_SUCCESS (if no processes of type + * @ref plist->mode exist). + * + * The usedGpuMemory field returned is all of the memory used by the application. + * The usedGpuCcProtectedMemory field returned is all of the protected memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a plist->procArray table in case new processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in + * vGPU Host virtualization mode. + * Protected memory usage is currently not available in MIG mode and in windows. + * + * @param device The device handle or MIG device handle + * @param plist Reference in which to process detail list + * @param plist->version The api version + * @param plist->mode The process mode + * @param plist->procArray Reference in which to return the process information + * @param plist->numProcArrayEntries Proc array size of returned entries + * + * @return + * - \ref NVML_SUCCESS if \a plist->numprocArrayEntries and \a plist->procArray have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a plist->numprocArrayEntries indicates that the \a plist->procArray is too small + * \a plist->numprocArrayEntries will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a plist is NULL, \a plist->version is invalid, + * \a plist->mode is invalid, + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRunningProcessDetailList(nvmlDevice_t device, nvmlProcessDetailList_t *plist); + +/** + * Check if the GPU devices are on the same physical board. + * + * For all fully supported products. + * + * @param device1 The first GPU device + * @param device2 The second GPU device + * @param onSameBoard Reference in which to return the status. + * Non-zero indicates that the GPUs are on the same board. + * + * @return + * - \ref NVML_SUCCESS if \a onSameBoard has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int *onSameBoard); + +/** + * Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs. + * If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions. + * + * For all fully supported products. + * + * @param device The identifier of the target device + * @param apiType Target API type for this operation + * @param isRestricted Reference in which to return the current restriction + * NVML_FEATURE_ENABLED indicates that the API is root-only + * NVML_FEATURE_DISABLED indicates that the API is accessible to all users + * + * @return + * - \ref NVML_SUCCESS if \a isRestricted has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support + * the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is + * not supported by the device) + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlRestrictedAPI_t + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t *isRestricted); + +/** + * Gets recent samples for the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by + * the driver. + * + * Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t. + * + * To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL. + * The returned samplesCount will provide the number of samples that can be queried. The user needs to + * allocate the buffer with size as samplesCount * sizeof(nvmlSample_t). + * + * lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the + * underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query + * to get more recent samples. + * + * This method fetches the number of entries which can be accommodated in the provided samples array, and the + * reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this + * method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost. + * + * @note On MIG-enabled GPUs, querying the following sample types, NVML_GPU_UTILIZATION_SAMPLES, NVML_MEMORY_UTILIZATION_SAMPLES + * NVML_ENC_UTILIZATION_SAMPLES and NVML_DEC_UTILIZATION_SAMPLES, is not currently supported. + * + * @param device The identifier for the target device + * @param type Type of sampling event + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t + * @param sampleCount Reference to provide the number of elements which can be queried in samples array + * @param samples Reference in which samples are returned + + * @return + * - \ref NVML_SUCCESS if samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or + * reference to \a sampleCount is 0 for non null \a samples + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, + nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); + +/** + * Gets Total, Available and Used size of BAR1 memory. + * + * BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party + * devices (peer-to-peer on the PCIE bus). + * + * @note In MIG mode, if device handle is provided, the API returns aggregate + * information, only if the caller has appropriate privileges. Per-instance + * information can be queried by using specific MIG device handles. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param bar1Memory Reference in which BAR1 memory + * information is returned. + * + * @return + * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); + +/** + * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power + * or thermal constraints. + * + * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The + * difference in violation times at two different reference times gives the indication of GPU throttling event. + * + * Violation for thermal capping is not supported at this time. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param perfPolicyType Represents Performance policy which can trigger GPU throttling + * @param violTime Reference to which violation time related information is returned + * + * + * @return + * - \ref NVML_SUCCESS if violation time is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); + +/** + * Gets the device's interrupt number + * + * @param device The identifier of the target device + * @param irqNum The interrupt number associated with the specified device + * + * @return + * - \ref NVML_SUCCESS if irq number is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a irqNum is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int *irqNum); + +/** + * Gets the device's core count + * + * @param device The identifier of the target device + * @param numCores The number of cores for the specified device + * + * @return + * - \ref NVML_SUCCESS if Gpu core count is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a numCores is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int *numCores); + +/** + * Gets the devices power source + * + * @param device The identifier of the target device + * @param powerSource The power source of the device + * + * @return + * - \ref NVML_SUCCESS if the current power source was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a powerSource is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t *powerSource); + +/** + * Gets the device's memory bus width + * + * @param device The identifier of the target device + * @param busWidth The devices's memory bus width + * + * @return + * - \ref NVML_SUCCESS if the memory bus width is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a busWidth is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int *busWidth); + +/** + * Gets the device's PCIE Max Link speed in MBPS + * + * @param device The identifier of the target device + * @param maxSpeed The devices's PCIE Max Link speed in MBPS + * + * @return + * - \ref NVML_SUCCESS if Pcie Max Link Speed is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a maxSpeed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int *maxSpeed); + +/** + * Gets the device's PCIe Link speed in Mbps + * + * @param device The identifier of the target device + * @param pcieSpeed The devices's PCIe Max Link speed in Mbps + * + * @return + * - \ref NVML_SUCCESS if \a pcieSpeed has been retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pcieSpeed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support PCIe speed getting + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieSpeed(nvmlDevice_t device, unsigned int *pcieSpeed); + +/** + * Gets the device's Adaptive Clock status + * + * @param device The identifier of the target device + * @param adaptiveClockStatus The current adaptive clocking status, either + * @ref NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED + * or @ref NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED + * + * @return + * - \ref NVML_SUCCESS if the current adaptive clocking status is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a adaptiveClockStatus is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int *adaptiveClockStatus); + +/** + * Get the type of the GPU Bus (PCIe, PCI, ...) + * + * @param device The identifier of the target device + * @param type The PCI Bus type + * + * return + * - \ref NVML_SUCCESS if the bus \a type is successfully retreived + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \device is invalid or \type is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t *type); + + + /** + * Deprecated: Will be deprecated in a future release. Use \ref nvmlDeviceGetGpuFabricInfoV instead + * + * Get fabric information associated with the device. + * + * %HOPPER_OR_NEWER% + * + * On Hopper + NVSwitch systems, GPU is registered with the NVIDIA Fabric Manager + * Upon successful registration, the GPU is added to the NVLink fabric to enable + * peer-to-peer communication. + * This API reports the current state of the GPU in the NVLink fabric + * along with other useful information. + * + * + * @param device The identifier of the target device + * @param gpuFabricInfo Information about GPU fabric state + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuFabricInfo(nvmlDevice_t device, nvmlGpuFabricInfo_t *gpuFabricInfo); + +/** +* Versioned wrapper around \ref nvmlDeviceGetGpuFabricInfo that accepts a versioned +* \ref nvmlGpuFabricInfo_v2_t or later output structure. +* +* @note The caller must set the \ref nvmlGpuFabricInfoV_t.version field to the +* appropriate version prior to calling this function. For example: +* \code +* nvmlGpuFabricInfoV_t fabricInfo = +* { .version = nvmlGpuFabricInfo_v2 }; +* nvmlReturn_t result = nvmlDeviceGetGpuFabricInfoV(device,&fabricInfo); +* \endcode +* +* %HOPPER_OR_NEWER% +* +* @param device The identifier of the target device +* @param gpuFabricInfo Information about GPU fabric state +* +* @return +* - \ref NVML_SUCCESS Upon success +* - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuFabricInfoV(nvmlDevice_t device, + nvmlGpuFabricInfoV_t *gpuFabricInfo); + +/** + * Get Conf Computing System capabilities. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param capabilities System CC capabilities + * + * @return + * - \ref NVML_SUCCESS if \a capabilities were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a capabilities is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeCapabilities(nvmlConfComputeSystemCaps_t *capabilities); + +/** + * Get Conf Computing System State. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param state System CC State + * + * @return + * - \ref NVML_SUCCESS if \a state were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a state is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeState(nvmlConfComputeSystemState_t *state); + +/** + * Get Conf Computing Protected and Unprotected Memory Sizes. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device Device handle + * @param memInfo Protected/Unprotected Memory sizes + * + * @return + * - \ref NVML_SUCCESS if \a memInfo were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a memInfo or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice_t device, nvmlConfComputeMemSizeInfo_t *memInfo); + +/** + * Get Conf Computing GPUs ready state. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param isAcceptingWork Returns GPU current work accepting state, + * NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or + * NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE + * + * return + * - \ref NVML_SUCCESS if \a current GPUs ready state were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeGpusReadyState(unsigned int *isAcceptingWork); + +/** + * Get Conf Computing protected memory usage. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param memory Reference in which to return the memory information + * + * @return + * - \ref NVML_SUCCESS if \a memory has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice_t device, nvmlMemory_t *memory); + +/** + * Get Conf Computing Gpu certificate details. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param gpuCert Reference in which to return the gpu certificate information + * + * @return + * - \ref NVML_SUCCESS if \a gpu certificate info has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice_t device, + nvmlConfComputeGpuCertificate_t *gpuCert); + +/** + * Get Conf Computing Gpu attestation report. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param gpuAtstReport Reference in which to return the gpu attestation report + * + * @return + * - \ref NVML_SUCCESS if \a gpu attestation report has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice_t device, + nvmlConfComputeGpuAttestationReport_t *gpuAtstReport); +/** + * Get Conf Computing key rotation threshold detail. + * + * %HOPPER_OR_NEWER% + * Supported on Linux, Windows TCC. + * + * @param pKeyRotationThrInfo Reference in which to return the key rotation threshold data + * + * @return + * - \ref NVML_SUCCESS if \a gpu key rotation threshold info has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeKeyRotationThresholdInfo( + nvmlConfComputeGetKeyRotationThresholdInfo_t *pKeyRotationThrInfo); + +/** + * Get Conf Computing System Settings. + * + * %HOPPER_OR_NEWER% + * Supported on Linux, Windows TCC. + * + * @param settings System CC settings + * + * @return + * - \ref NVML_SUCCESS if the query is success + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counters is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the provided version is invalid/unsupported + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeSettings(nvmlSystemConfComputeSettings_t *settings); + +/** + * Retrieve GSP firmware version. + * + * The caller passes in buffer via \a version and corresponding GSP firmware numbered version + * is returned with the same parameter in string format. + * + * @param device Device handle + * @param version The retrieved GSP firmware version + * + * @return + * - \ref NVML_SUCCESS if GSP firmware version is sucessfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or GSP \a version pointer is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if GSP firmware is not enabled for GPU + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t device, char *version); + +/** + * Retrieve GSP firmware mode. + * + * The caller passes in integer pointers. GSP firmware enablement and default mode information is returned with + * corresponding parameters. The return value in \a isEnabled and \a defaultMode should be treated as boolean. + * + * @param device Device handle + * @param isEnabled Pointer to specify if GSP firmware is enabled + * @param defaultMode Pointer to specify if GSP firmware is supported by default on \a device + * + * @return + * - \ref NVML_SUCCESS if GSP firmware mode is sucessfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or any of \a isEnabled or \a defaultMode is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareMode(nvmlDevice_t device, unsigned int *isEnabled, unsigned int *defaultMode); + +/** + * @} + */ + +/** @addtogroup nvmlAccountingStats + * @{ + */ + +/** + * Queries the state of per process accounting mode. + * + * For Kepler &tm; or newer fully supported devices. + * + * See \ref nvmlDeviceGetAccountingStats for more details. + * See \ref nvmlDeviceSetAccountingMode + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current accounting mode + * + * @return + * - \ref NVML_SUCCESS if the mode has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Queries process's accounting stats. + * + * For Kepler &tm; or newer fully supported devices. + * + * Accounting stats capture GPU utilization and other statistics across the lifetime of a process. + * Accounting stats can be queried during life time of the process and after its termination. + * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and + * updated to actual running time after its termination. + * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old + * processes. + * + * See \ref nvmlAccountingStats_t for description of each returned metric. + * List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids. + * + * @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode. + * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be + * queried since they don't contribute to GPU utilization. + * @note In case of pid collision stats of only the latest process (that terminated last) will be reported + * + * @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU. + * + * @param device The identifier of the target device + * @param pid Process Id of the target process to query stats for + * @param stats Reference in which to return the process's accounting stats + * + * @return + * - \ref NVML_SUCCESS if stats have been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL + * - \ref NVML_ERROR_NOT_FOUND if process stats were not found + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled + * or on vGPU host. + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingBufferSize + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t *stats); + +/** + * Queries list of processes that can be queried for accounting stats. The list of processes returned + * can be in running or terminated state. + * + * For Kepler &tm; or newer fully supported devices. + * + * To just query the number of processes ready to be queried, call this function with *count = 0 and + * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. + * + * For more details see \ref nvmlDeviceGetAccountingStats. + * + * @note In case of PID collision some processes might not be accessible before the circular buffer is full. + * + * @param device The identifier of the target device + * @param count Reference in which to provide the \a pids array size, and + * to return the number of elements ready to be queried + * @param pids Reference in which to return list of process ids + * + * @return + * - \ref NVML_SUCCESS if pids were successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled + * or on vGPU host. + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to + * expected value) + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingBufferSize + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int *count, unsigned int *pids); + +/** + * Returns the number of processes that the circular buffer with accounting pids can hold. + * + * For Kepler &tm; or newer fully supported devices. + * + * This is the maximum number of processes that accounting information will be stored for before information + * about oldest processes will get overwritten by information about new processes. + * + * @param device The identifier of the target device + * @param bufferSize Reference in which to provide the size (in number of elements) + * of the circular buffer for accounting stats. + * + * @return + * - \ref NVML_SUCCESS if buffer size was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingStats + * @see nvmlDeviceGetAccountingPids + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int *bufferSize); + +/** @} */ + +/** @addtogroup nvmlDeviceQueries + * @{ + */ + +/** + * Returns the list of retired pages by source, including pages that are pending retirement + * The address information provided from this API is the hardware address of the page that was retired. Note + * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param cause Filter page addresses by cause of retirement + * @param pageCount Reference in which to provide the \a addresses buffer size, and + * to return the number of retired pages that match \a cause + * Set to 0 to query the size without allocating an \a addresses buffer + * @param addresses Buffer to write the page addresses into + * + * @return + * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the + * matching page addresses. \a pageCount is set to the needed size. + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or + * \a addresses is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, + unsigned int *pageCount, unsigned long long *addresses); + +/** + * Returns the list of retired pages by source, including pages that are pending retirement + * The address information provided from this API is the hardware address of the page that was retired. Note + * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 + * + * \note nvmlDeviceGetRetiredPages_v2 adds an additional timestamps parameter to return the time of each page's + * retirement. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param cause Filter page addresses by cause of retirement + * @param pageCount Reference in which to provide the \a addresses buffer size, and + * to return the number of retired pages that match \a cause + * Set to 0 to query the size without allocating an \a addresses buffer + * @param addresses Buffer to write the page addresses into + * @param timestamps Buffer to write the timestamps of page retirement, additional for _v2 + * + * @return + * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the + * matching page addresses. \a pageCount is set to the needed size. + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or + * \a addresses is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages_v2(nvmlDevice_t device, nvmlPageRetirementCause_t cause, + unsigned int *pageCount, unsigned long long *addresses, unsigned long long *timestamps); + +/** + * Check if any pages are pending retirement and need a reboot to fully retire. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param isPending Reference in which to return the pending status + * + * @return + * - \ref NVML_SUCCESS if \a isPending was populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); + +/** + * Get number of remapped rows. The number of rows reported will be based on + * the cause of the remapping. isPending indicates whether or not there are + * pending remappings. A reset will be required to actually remap the row. + * failureOccurred will be set if a row remapping ever failed in the past. A + * pending remapping won't affect future work on the GPU since + * error-containment and dynamic page blacklisting will take care of that. + * + * @note On MIG-enabled GPUs with active instances, querying the number of + * remapped rows is not supported + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param corrRows Reference for number of rows remapped due to correctable errors + * @param uncRows Reference for number of rows remapped due to uncorrectable errors + * @param isPending Reference for whether or not remappings are pending + * @param failureOccurred Reference that is set when a remapping has failed in the past + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a corrRows, \a uncRows, \a isPending or \a failureOccurred is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN Unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRemappedRows(nvmlDevice_t device, unsigned int *corrRows, unsigned int *uncRows, + unsigned int *isPending, unsigned int *failureOccurred); + +/** + * Get the row remapper histogram. Returns the remap availability for each bank + * on the GPU. + * + * @param device Device handle + * @param values Histogram values + * + * @return + * - \ref NVML_SUCCESS On success + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t device, nvmlRowRemapperHistogramValues_t *values); + +/** + * Get architecture for device + * + * @param device The identifier of the target device + * @param arch Reference where architecture is returned, if call successful. + * Set to NVML_DEVICE_ARCH_* upon success + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a arch (output refererence) are invalid + */ +nvmlReturn_t DECLDIR nvmlDeviceGetArchitecture(nvmlDevice_t device, nvmlDeviceArchitecture_t *arch); + +/** + * Retrieves the frequency monitor fault status for the device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root user. + * + * See \ref nvmlClkMonStatus_t for details on decoding the status output. + * + * @param device The identifier of the target device + * @param status Reference in which to return the clkmon fault status + * + * @return + * - \ref NVML_SUCCESS if \a status has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a status is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetClkMonStatus() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t *status); + +/** + * Retrieves the current utilization and process ID + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. + * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at + * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization + * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values + * are returned as "unsigned int" values. If no valid sample entries are found since the lastSeenTimeStamp, NVML_ERROR_NOT_FOUND + * is returned. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilization set to NULL. The caller should allocate a buffer of size + * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed + * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. + * + * On successful return, the function updates \a processSamplesCount with the number of process utilization sample + * structures that were actually written. This may differ from a previously read value as instances are created or + * destroyed. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @note On MIG-enabled GPUs, querying process utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned + * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, + unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); + +/** + * Retrieves the recent utilization and process ID for all running processes + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder, jpeg decoder, OFA (Optical Flow Accelerator) + * for all running processes. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at + * by \a procesesUtilInfo->procUtilArray. One utilization sample structure is returned per process running, that had some non-zero utilization + * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values + * are returned as "unsigned int" values. + * + * The caller should allocate a buffer of size processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t). If the buffer is too small, the API will + * return \a NVML_ERROR_INSUFFICIENT_SIZE, with the recommended minimal buffer size at \a procesesUtilInfo->processSamplesCount. The caller should + * invoke the function again with the allocated buffer passed in \a procesesUtilInfo->procUtilArray, and \a procesesUtilInfo->processSamplesCount + * set to the number no less than the recommended value by the previous API return. + * + * On successful return, the function updates \a procesesUtilInfo->processSamplesCount with the number of process utilization info structures + * that were actually written. This may differ from a previously read value as instances are created or destroyed. + * + * \a procesesUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a procesesUtilInfo->lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * \a procesesUtilInfo->version is the version number of the structure nvmlProcessesUtilizationInfo_t, the caller should set the correct version + * number to retrieve the specific version of processes utilization information. + * + * @note On MIG-enabled GPUs, querying process utilization is not currently supported. + * + * @param device The identifier of the target device + * @param procesesUtilInfo Pointer to the caller-provided structure of nvmlProcessesUtilizationInfo_t. + + * @return + * - \ref NVML_SUCCESS if \a procesesUtilInfo->procUtilArray has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a procesesUtilInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a procesesUtilInfo is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a procesesUtilInfo->procUtilArray is NULL, or the buffer size of procesesUtilInfo->procUtilArray is too small. + * The caller should check the minimul array size from the returned procesesUtilInfo->processSamplesCount, and call + * the function again with a buffer no smaller than procesesUtilInfo->processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t) + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice_t device, nvmlProcessesUtilizationInfo_t *procesesUtilInfo); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUnitCommands Unit Commands + * This chapter describes NVML operations that change the state of the unit. For S-class products. + * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION + * error code when invoking any of these methods. + * @{ + */ +/***************************************************************************************************/ + +/** + * Set the LED state for the unit. The LED can be either green (0) or amber (1). + * + * For S-class products. + * Requires root/admin permissions. + * + * This operation takes effect immediately. + * + * + * Current S-Class products don't provide unique LEDs for each unit. As such, both front + * and back LEDs will be toggled in unison regardless of which unit is specified with this command. + * + * See \ref nvmlLedColor_t for available colors. + * + * @param unit The identifier of the target unit + * @param color The target LED color + * + * @return + * - \ref NVML_SUCCESS if the LED color has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlUnitGetLedState() + */ +nvmlReturn_t DECLDIR nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceCommands Device Commands + * This chapter describes NVML operations that change the state of the device. + * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION + * error code when invoking any of these methods. + * @{ + */ +/***************************************************************************************************/ + +/** + * Set the persistence mode for the device. + * + * For all products. + * For Linux only. + * Requires root/admin permissions. + * + * The persistence mode determines whether the GPU driver software is torn down after the last client + * exits. + * + * This operation takes effect immediately. It is not persistent across reboots. After each reboot the + * persistence mode is reset to "Disabled". + * + * See \ref nvmlEnableState_t for available modes. + * + * After calling this API with mode set to NVML_FEATURE_DISABLED on a device that has its own NUMA + * memory, the given device handle will no longer be valid, and to continue to interact with this + * device, a new handle should be obtained from one of the nvmlDeviceGetHandleBy*() APIs. This + * limitation is currently only applicable to devices that have a coherent NVLink connection to + * system memory. + * + * @param device The identifier of the target device + * @param mode The target persistence mode + * + * @return + * - \ref NVML_SUCCESS if the persistence mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetPersistenceMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode); + +/** + * Set the compute mode for the device. + * + * For all products. + * Requires root/admin permissions. + * + * The compute mode determines whether a GPU can be used for compute operations and whether it can + * be shared across contexts. + * + * This operation takes effect immediately. Under Linux it is not persistent across reboots and + * always resets to "Default". Under windows it is persistent. + * + * Under windows compute mode may only be set to DEFAULT when running in WDDM + * + * @note On MIG-enabled GPUs, compute mode would be set to DEFAULT and changing it is not supported. + * + * See \ref nvmlComputeMode_t for details on available compute modes. + * + * @param device The identifier of the target device + * @param mode The target compute mode + * + * @return + * - \ref NVML_SUCCESS if the compute mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetComputeMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode); + +/** + * Set the ECC mode for the device. + * + * For Kepler &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * Requires root/admin permissions. + * + * The ECC mode determines whether the GPU enables its ECC support. + * + * This operation takes effect after the next reboot. + * + * See \ref nvmlEnableState_t for details on available modes. + * + * @param device The identifier of the target device + * @param ecc The target ECC mode + * + * @return + * - \ref NVML_SUCCESS if the ECC mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc); + +/** + * Clear the ECC error and other memory error counts for the device. + * + * For Kepler &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. + * Requires root/admin permissions. + * Requires ECC Mode to be enabled. + * + * Sets all of the specified ECC counters to 0, including both detailed and total counts. + * + * This operation takes effect immediately. + * + * See \ref nvmlMemoryErrorType_t for details on available counter types. + * + * @param device The identifier of the target device + * @param counterType Flag that indicates which type of errors should be cleared. + * + * @return + * - \ref NVML_SUCCESS if the error counts were cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see + * - nvmlDeviceGetDetailedEccErrors() + * - nvmlDeviceGetTotalEccErrors() + */ +nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType); + +/** + * Set the driver model for the device. + * + * For Fermi &tm; or newer fully supported devices. + * For windows only. + * Requires root/admin permissions. + * + * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached + * to the device it must run in WDDM mode. + * + * It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce). + * This should only be done if the host is subsequently powered down and the display is detached from the device + * before the next reboot. + * + * This operation takes effect after the next reboot. + * + * Windows driver model may only be set to WDDM when running in DEFAULT compute mode. + * + * Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or + * will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode. + * + * See \ref nvmlDriverModel_t for details on available driver models. + * See \ref nvmlFlagDefault and \ref nvmlFlagForce + * + * @param device The identifier of the target device + * @param driverModel The target driver model + * @param flags Flags that change the default behavior + * + * @return + * - \ref NVML_SUCCESS if the driver model has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetDriverModel() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags); + +typedef enum nvmlClockLimitId_enum { + NVML_CLOCK_LIMIT_ID_RANGE_START = 0xffffff00, + NVML_CLOCK_LIMIT_ID_TDP, + NVML_CLOCK_LIMIT_ID_UNLIMITED +} nvmlClockLimitId_t; + +/** + * Set clocks that device will lock to. + * + * Sets the clocks that the device will be running at to the value in the range of minGpuClockMHz to maxGpuClockMHz. + * Setting this will supersede application clock values and take effect regardless if a cuda app is running. + * See /ref nvmlDeviceSetApplicationsClocks + * + * Can be used as a setting to request constant performance. + * + * This can be called with a pair of integer clock frequencies in MHz, or a pair of /ref nvmlClockLimitId_t values. + * See the table below for valid combinations of these values. + * + * minGpuClock | maxGpuClock | Effect + * ------------+-------------+-------------------------------------------------- + * tdp | tdp | Lock clock to TDP + * unlimited | tdp | Upper bound is TDP but clock may drift below this + * tdp | unlimited | Lower bound is TDP but clock may boost above this + * unlimited | unlimited | Unlocked (== nvmlDeviceResetGpuLockedClocks) + * + * If one arg takes one of these values, the other must be one of these values as + * well. Mixed numeric and symbolic calls return NVML_ERROR_INVALID_ARGUMENT. + * + * Requires root/admin permissions. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetGpuLockedClocks. + * + * For Volta &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minGpuClockMHz Requested minimum gpu clock in MHz + * @param maxGpuClockMHz Requested maximum gpu clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpuLockedClocks(nvmlDevice_t device, unsigned int minGpuClockMHz, unsigned int maxGpuClockMHz); + +/** + * Resets the gpu clock to the default value + * + * This is the gpu clock that will be used after system reboot or driver reload. + * Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * @see nvmlDeviceSetGpuLockedClocks + * + * For Volta &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device); + +/** + * Set memory clocks that device will lock to. + * + * Sets the device's memory clocks to the value in the range of minMemClockMHz to maxMemClockMHz. + * Setting this will supersede application clock values and take effect regardless of whether a cuda app is running. + * See /ref nvmlDeviceSetApplicationsClocks + * + * Can be used as a setting to request constant performance. + * + * Requires root/admin permissions. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetMemoryLockedClocks. + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minMemClockMHz Requested minimum memory clock in MHz + * @param maxMemClockMHz Requested maximum memory clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetMemoryLockedClocks(nvmlDevice_t device, unsigned int minMemClockMHz, unsigned int maxMemClockMHz); + +/** + * Resets the memory clock to the default value + * + * This is the memory clock that will be used after system reboot or driver reload. + * Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * @see nvmlDeviceSetMemoryLockedClocks + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t device); + +/** + * Set clocks that applications will lock to. + * + * Sets the clocks that compute and graphics applications will be running at. + * e.g. CUDA driver requests these clocks during context creation which means this property + * defines clocks at which CUDA applications will be running unless some overspec event + * occurs (e.g. over power, over thermal or external HW brake). + * + * Can be used as a setting to request constant performance. + * + * On Pascal and newer hardware, this will automatically disable automatic boosting of clocks. + * + * On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call + * \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting + * above the clock value being set. + * + * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks + * for details on how to list available clocks combinations. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetApplicationsClocks. + * + * @param device The identifier of the target device + * @param memClockMHz Requested memory clock in MHz + * @param graphicsClockMHz Requested graphics clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); + +/** + * Resets the application clock to the default value + * + * This is the applications clock that will be used after system reboot or driver reload. + * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, + * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above + * base clocks as thermal limits allow. + * + * @see nvmlDeviceGetApplicationsClock + * @see nvmlDeviceSetApplicationsClocks + * + * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); + +/** + * Try to set the current state of Auto Boosted clocks on a device. + * + * For Kepler &tm; or newer fully supported devices. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * Non-root users may use this API by default but can be restricted by root from using this API by calling + * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. + * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set Auto Boosted clocks of the target device to + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); + +/** + * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will + * return to when no compute running processes (e.g. CUDA application which have an active context) are running + * + * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * Requires root/admin permissions. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set default Auto Boosted clocks of the target device to + * @param flags Flags that change the default behavior. Currently Unused. + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); + +/** + * Sets the speed of the fan control policy to default. + * + * For all cuda-capable discrete products with fans + * + * @param device The identifier of the target device + * @param fan The index of the fan, starting at zero + * + * return + * NVML_SUCCESS if speed has been adjusted + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if device is invalid + * NVML_ERROR_NOT_SUPPORTED if the device does not support this + * (doesn't have fans) + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan); + +/** + * Sets current fan control policy. + * + * For Maxwell &tm; or newer fully supported devices. + * + * Requires privileged user. + * + * For all cuda-capable discrete products with fans + * + * device The identifier of the target \a device + * policy The fan control \a policy to set + * + * return + * NVML_SUCCESS if \a policy has been set + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference + * a fan that exists. + * NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, + nvmlFanControlPolicy_t policy); + +/** + * Sets the temperature threshold for the GPU with the specified threshold type in degrees C. + * + * For Maxwell &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value to be set + * @param temp Reference which hold the value to be set + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int *temp); + +/** + * Set new power limit of this device. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. + * + * \note Limit is not persistent across reboots or driver unloads. + * Enable persistent mode to prevent driver from unloading when no application is using the device. + * + * @param device The identifier of the target device + * @param limit Power management limit in milliwatts to set + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetPowerManagementLimitConstraints + * @see nvmlDeviceGetPowerManagementDefaultLimit + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit); + +/** + * Sets new GOM. See \a nvmlGpuOperationMode_t for details. + * + * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. + * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. + * Not supported on Quadro ® and Tesla &tm; C-class products. + * Requires root/admin permissions. + * + * Changing GOMs requires a reboot. + * The reboot requirement might be removed in the future. + * + * Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when + * pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel. + * + * @param device The identifier of the target device + * @param mode Target GOM + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlGpuOperationMode_t + * @see nvmlDeviceGetGpuOperationMode + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode); + +/** + * Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs. + * This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. + * The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction + * to query the current restriction settings. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @param device The identifier of the target device + * @param apiType Target API type for this operation + * @param isRestricted The target restriction + * + * @return + * - \ref NVML_SUCCESS if \a isRestricted has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support + * the feature that api restrictions are being set for (E.G. Enabling/disabling auto + * boosted clocks is not supported by the device) + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlRestrictedAPI_t + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); + +/** + * Sets the speed of a specified fan. + * + * WARNING: This function changes the fan control policy to manual. It means that YOU have to monitor + * the temperature and adjust the fan speed accordingly. + * If you set the fan speed too low you can burn your GPU! + * Use nvmlDeviceSetDefaultFanSpeed_v2 to restore default control policy. + * + * For all cuda-capable discrete products with fans that are Maxwell or Newer. + * + * device The identifier of the target device + * fan The index of the fan, starting at zero + * speed The target speed of the fan [0-100] in % of max speed + * + * return + * NVML_SUCCESS if the fan speed has been set + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if the device is not valid, or the speed is outside acceptable ranges, + * or if the fan index doesn't reference an actual fan. + * NVML_ERROR_NOT_SUPPORTED if the device is older than Maxwell. + * NVML_ERROR_UNKNOWN if there was an unexpected error. + */ +nvmlReturn_t DECLDIR nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed); + +/** + * Set the GPCCLK VF offset value + * @param[in] device The identifier of the target device + * @param[in] offset The GPCCLK VF offset value to set + * + * @return + * - \ref NVML_SUCCESS if \a offset has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpcClkVfOffset(nvmlDevice_t device, int offset); + +/** + * Set the MemClk (Memory Clock) VF offset value. It requires elevated privileges. + * @param[in] device The identifier of the target device + * @param[in] offset The MemClk VF offset value to set + * + * @return + * - \ref NVML_SUCCESS if \a offset has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetMemClkVfOffset(nvmlDevice_t device, int offset); + +/** + * Set Conf Computing Unprotected Memory Size. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device Device Handle + * @param sizeKiB Unprotected Memory size to be set in KiB + * + * @return + * - \ref NVML_SUCCESS if \a sizeKiB successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice_t device, unsigned long long sizeKiB); + +/** + * Set Conf Computing GPUs ready state. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param isAcceptingWork GPU accepting new work, NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or + * NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE + * + * return + * - \ref NVML_SUCCESS if \a current GPUs ready state is successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlSystemSetConfComputeGpusReadyState(unsigned int isAcceptingWork); + +/** + * Set Conf Computing key rotation threshold. + * + * %HOPPER_OR_NEWER% + * Supported on Linux, Windows TCC. + * + * This function is to set the confidential compute key rotation threshold parameters. + * @ref pKeyRotationThrInfo->maxAttackerAdvantage should be in the range from + * NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN to NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX. + * Default value is 60. + * + * @param pKeyRotationThrInfo Reference to the key rotation threshold data + * + * @return + * - \ref NVML_SUCCESS if \a key rotation threashold max attacker advantage has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_INVALID_STATE if confidential compute GPU ready state is enabled + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlSystemSetConfComputeKeyRotationThresholdInfo( + nvmlConfComputeSetKeyRotationThresholdInfo_t *pKeyRotationThrInfo); + +/** + * @} + */ + +/** @addtogroup nvmlAccountingStats + * @{ + */ + +/** + * Enables or disables per process accounting. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @note This setting is not persistent and will default to disabled after driver unloads. + * Enable persistence mode to be sure the setting doesn't switch off to disabled. + * + * @note Enabling accounting mode has no negative impact on the GPU performance. + * + * @note Disabling accounting clears all accounting pids information. + * + * @note On MIG-enabled GPUs, accounting mode would be set to DISABLED and changing it is not supported. + * + * See \ref nvmlDeviceGetAccountingMode + * See \ref nvmlDeviceGetAccountingStats + * See \ref nvmlDeviceClearAccountingPids + * + * @param device The identifier of the target device + * @param mode The target accounting mode + * + * @return + * - \ref NVML_SUCCESS if the new mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode); + +/** + * Clears accounting information about all processes that have already terminated. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetAccountingMode + * See \ref nvmlDeviceGetAccountingStats + * See \ref nvmlDeviceSetAccountingMode + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if accounting information has been cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceClearAccountingPids(nvmlDevice_t device); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup NvLink NvLink Methods + * This chapter describes methods that NVML can perform on NVLINK enabled devices. + * @{ + */ +/***************************************************************************************************/ + +/** + * Retrieves the state of the device's NvLink for the link specified + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that + * the link is active and NVML_FEATURE_DISABLED indicates it + * is inactive + * + * @return + * - \ref NVML_SUCCESS if \a isActive has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t *isActive); + +/** + * Retrieves the version of the device's NvLink for the link specified + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param version Requested NvLink version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int *version); + +/** + * Retrieves the requested capability from the device's NvLink for the link specified + * Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried + * The return value should be treated as a boolean. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param capability Specifies the \a nvmlNvLinkCapability_t to be queried + * @param capResult A boolean for the queried capability indicating that feature is available + * + * @return + * - \ref NVML_SUCCESS if \a capResult has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, + nvmlNvLinkCapability_t capability, unsigned int *capResult); + +/** + * Retrieves the PCI information for the remote node on a NvLink link + * Note: pciSubSystemId is not filled in this function and is indeterminate + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param pci \a nvmlPciInfo_t of the remote node for the specified link + * + * @return + * - \ref NVML_SUCCESS if \a pci has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo_v2(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); + +/** + * Retrieves the specified error counter value + * Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the NvLink counter to be queried + * @param counterValue Returned counter value + * + * @return + * - \ref NVML_SUCCESS if \a counter has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, + nvmlNvLinkErrorCounter_t counter, unsigned long long *counterValue); + +/** + * Resets all error counters to zero + * Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * + * @return + * - \ref NVML_SUCCESS if the reset is successful + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link); + +/** + * Deprecated: Setting utilization counter control is no longer supported. + * + * Set the NVLINK utilization counter control information for the specified counter, 0 or 1. + * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset + * of the counters if the reset parameter is non-zero. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param counter Specifies the counter that should be set (0 or 1). + * @param link Specifies the NvLink link to be queried + * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set + * @param reset Resets the counters on set if non-zero + * + * @return + * - \ref NVML_SUCCESS if the control has been set successfully + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, + nvmlNvLinkUtilizationControl_t *control, unsigned int reset); + +/** + * Deprecated: Getting utilization counter control is no longer supported. + * + * Get the NVLINK utilization counter control information for the specified counter, 0 or 1. + * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param counter Specifies the counter that should be set (0 or 1). + * @param link Specifies the NvLink link to be queried + * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information + * + * @return + * - \ref NVML_SUCCESS if the control has been set successfully + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, + nvmlNvLinkUtilizationControl_t *control); + + +/** + * Deprecated: Use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_NVLINK_THROUGHPUT_* as field values instead. + * + * Retrieve the NVLINK utilization counter based on the current control for a specified counter. + * In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl + * before reading the utilization counters as they have no default state + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the counter that should be read (0 or 1). + * @param rxcounter Receive counter return value + * @param txcounter Transmit counter return value + * + * @return + * - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationCounter(nvmlDevice_t device, unsigned int link, unsigned int counter, + unsigned long long *rxcounter, unsigned long long *txcounter); + +/** + * Deprecated: Freezing NVLINK utilization counters is no longer supported. + * + * Freeze the NVLINK utilization counters + * Both the receive and transmit counters are operated on by this function + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the counter that should be frozen (0 or 1). + * @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters + * NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters + * + * @return + * - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceFreezeNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, + unsigned int counter, nvmlEnableState_t freeze); + +/** + * Deprecated: Resetting NVLINK utilization counters is no longer supported. + * + * Reset the NVLINK utilization counters + * Both the receive and transmit counters are operated on by this function + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be reset + * @param counter Specifies the counter that should be reset (0 or 1) + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, unsigned int counter); + +/** +* Get the NVLink device type of the remote device connected over the given link. +* +* @param device The device handle of the target GPU +* @param link The NVLink link index on the target GPU +* @param pNvLinkDeviceType Pointer in which the output remote device type is returned +* +* @return +* - \ref NVML_SUCCESS if \a pNvLinkDeviceType has been set +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_NOT_SUPPORTED if NVLink is not supported +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid, or +* \a pNvLinkDeviceType is NULL +* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is +* otherwise inaccessible +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t device, unsigned int link, nvmlIntNvLinkDeviceType_t *pNvLinkDeviceType); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlEvents Event Handling Methods + * This chapter describes methods that NVML can perform against each device to register and wait for + * some event to occur. + * @{ + */ +/***************************************************************************************************/ + +/** + * Create an empty set of events. + * Event set should be freed by \ref nvmlEventSetFree + * + * For Fermi &tm; or newer fully supported devices. + * @param set Reference in which to return the event handle + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventSetFree + */ +nvmlReturn_t DECLDIR nvmlEventSetCreate(nvmlEventSet_t *set); + +/** + * Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t + * + * For Fermi &tm; or newer fully supported devices. + * Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors) + * Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode) + * + * For Linux only. + * + * \b IMPORTANT: Operations on \a set are not thread safe + * + * This call starts recording of events on specific device. + * All events that occurred before this call are not recorded. + * Checking if some event occurred can be done with \ref nvmlEventSetWait_v2 + * + * If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed. + * If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes + * are registered in that case. + * + * @param device The identifier of the target device + * @param eventTypes Bitmask of \ref nvmlEventType to record + * @param set Set to which add new event types + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceGetSupportedEventTypes + * @see nvmlEventSetWait + * @see nvmlEventSetFree + */ +nvmlReturn_t DECLDIR nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set); + +/** + * Returns information about events supported on device + * + * For Fermi &tm; or newer fully supported devices. + * + * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. + * + * @param device The identifier of the target device + * @param eventTypes Reference in which to return bitmask of supported events + * + * @return + * - \ref NVML_SUCCESS if the eventTypes has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long *eventTypes); + +/** + * Waits on events and delivers events + * + * For Fermi &tm; or newer fully supported devices. + * + * If some events are ready to be delivered at the time of the call, function returns immediately. + * If there are no events ready to be delivered, function sleeps till event arrives + * but not longer than specified timeout. This function in certain conditions can return before + * specified timeout passes (e.g. when interrupt arrives) + * + * On Windows, in case of xid error, the function returns the most recent xid error type seen by the system. + * If there are multiple xid errors generated before nvmlEventSetWait is invoked then the last seen xid error + * type is returned for all xid error events. + * + * On Linux, every xid error event would return the associated event data and other information if applicable. + * + * In MIG mode, if device handle is provided, the API reports all the events for the available instances, + * only if the caller has appropriate privileges. In absence of required privileges, only the events which + * affect all the instances (i.e. whole device) are reported. + * + * This API does not currently support per-instance event reporting using MIG device handles. + * + * @param set Reference to set of events to wait on + * @param data Reference in which to return event data + * @param timeoutms Maximum amount of wait time in milliseconds for registered event + * + * @return + * - \ref NVML_SUCCESS if the data has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL + * - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived + * - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlEventSetWait_v2(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); + +/** + * Releases events in the set + * + * For Fermi &tm; or newer fully supported devices. + * + * @param set Reference to events to be released + * + * @return + * - \ref NVML_SUCCESS if the event has been successfully released + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlEventSetFree(nvmlEventSet_t set); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlZPI Drain states + * This chapter describes methods that NVML can perform against each device to control their drain state + * and recognition by NVML and NVIDIA kernel driver. These methods can be used with out-of-band tools to + * power on/off GPUs, enable robust reset scenarios, etc. + * @{ + */ +/***************************************************************************************************/ + +/** + * Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. + * Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before + * this call is made. + * Must be called as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU drain state to be modified + * @param newState The drain state that should be entered, see \ref nvmlEnableState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation + * - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceModifyDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t newState); + +/** + * Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining + * state. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU drain state to be queried + * @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceQueryDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t *currentState); + +/** + * This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver + * as long as no other processes are attached. If other processes are attached, this call will return + * NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the + * only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called + * to initiate the draining state is if that process was using, and is still using, a GPU before the + * call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled + * prior to this call. + * + * For long-running NVML processes please note that this will change the enumeration of current GPUs. + * For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2. + * Also, device handles after the removed GPU will not be valid and must be re-established. + * Must be run as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU to be removed + * @param gpuState Whether the GPU is to be removed, from the OS + * see \ref nvmlDetachGpuState_t + * @param linkState Requested upstream PCIe link state, see \ref nvmlPcieLinkState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed + */ +nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu_v2(nvmlPciInfo_t *pciInfo, nvmlDetachGpuState_t gpuState, nvmlPcieLinkState_t linkState); + +/** + * Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that + * were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. + * If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes + * the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order. + * + * In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds + * per GPU. Also, all device handles are no longer guaranteed to be valid post discovery. + * + * Must be run as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI tree to be searched. Only the domain, bus, and device + * fields are used in this call. + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature + * - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature + * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceDiscoverGpus (nvmlPciInfo_t *pciInfo); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlFieldValueQueries Field Value Queries + * This chapter describes NVML operations that are associated with retrieving Field Values from NVML + * @{ + */ +/***************************************************************************************************/ + +/** + * Request values for a list of fields for a device. This API allows multiple fields to be queried at once. + * If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs + * will be populated from a single call rather than making a driver call for each fieldId. + * + * @param device The device handle of the GPU to request field values for + * @param valuesCount Number of entries in values that should be retrieved + * @param values Array of \a valuesCount structures to hold field values. + * Each value's fieldId must be populated prior to this call + * + * @return + * - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must + * check the nvmlReturn field of each value for each individual + * status + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); + +/** + * Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once. + * + * @param device The device handle of the GPU to request field values for + * @param valuesCount Number of entries in values that should be cleared + * @param values Array of \a valuesCount structures to hold field values. + * Each value's fieldId must be populated prior to this call + * + * @return + * - \ref NVML_SUCCESS if any values in \a values were cleared. Note that you must + * check the nvmlReturn field of each value for each individual + * status + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL + */ +nvmlReturn_t DECLDIR nvmlDeviceClearFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup vGPU Enums, Constants and Structs + * @{ + */ +/** @} */ +/***************************************************************************************************/ + +/***************************************************************************************************/ +/** @defgroup nvmlVirtualGpuQueries vGPU APIs + * This chapter describes operations that are associated with NVIDIA vGPU Software products. + * @{ + */ +/***************************************************************************************************/ + +/** + * This method is used to get the virtualization mode corresponding to the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device Identifier of the target device + * @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_? + * + * @return + * - \ref NVML_SUCCESS if \a pVirtualMode is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t *pVirtualMode); + +/** + * Queries if SR-IOV host operation is supported on a vGPU supported device. + * + * Checks whether SR-IOV host capability is supported by the device and the + * driver, and indicates device is in SR-IOV mode if both of these conditions + * are true. + * + * @param device The identifier of the target device + * @param pHostVgpuMode Reference in which to return the current vGPU mode + * + * @return + * - \ref NVML_SUCCESS if device's vGPU mode has been successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is 0 or \a pVgpuMode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature. + * - \ref NVML_ERROR_UNKNOWN if any unexpected error occurred + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpuMode_t *pHostVgpuMode); + +/** + * This method is used to set the virtualization mode corresponding to the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device Identifier of the target device + * @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_? + * + * @return + * - \ref NVML_SUCCESS if \a virtualMode is set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a virtualMode is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported. + * - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client. + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); + +/** + * Get the vGPU heterogeneous mode for the device. + * + * When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes. + * + * On successful return, the function returns \a pHeterogeneousMode->mode with the current vGPU heterogeneous mode. + * \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should + * set the correct version number to retrieve the vGPU heterogeneous mode. + * \a pHeterogeneousMode->mode can either be \ref NVML_FEATURE_ENABLED or \ref NVML_FEATURE_DISABLED. + * + * @param device The identifier of the target device + * @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a pHeterogeneousMode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support this feature + * - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice_t device, nvmlVgpuHeterogeneousMode_t *pHeterogeneousMode); + +/** + * Enable or disable vGPU heterogeneous mode for the device. + * + * When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes. + * + * API would return an appropriate error code upon unsuccessful activation. For example, the heterogeneous mode + * set will fail with error \ref NVML_ERROR_IN_USE if any vGPU instance is active on the device. The caller of this API + * is expected to shutdown the vGPU VMs and retry setting the \a mode. + * On successful return, the function updates the vGPU heterogeneous mode with the user provided \a pHeterogeneousMode->mode. + * \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should + * set the correct version number to set the vGPU heterogeneous mode. + * + * @param device Identifier of the target device + * @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a pHeterogeneousMode is NULL or \a pHeterogeneousMode->mode is invalid + * - \ref NVML_ERROR_IN_USE If the \a device is in use + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device doesn't support this feature + * - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice_t device, const nvmlVgpuHeterogeneousMode_t *pHeterogeneousMode); + +/** + * Query the placement ID of active vGPU instance. + * + * When in vGPU heterogeneous mode, this function returns a valid placement ID as \a pPlacement->placementId + * else NVML_INVALID_VGPU_PLACEMENT_ID is returned. + * \a pPlacement->version is the version number of the structure nvmlVgpuPlacementId_t, the caller should + * set the correct version number to get placement id of the vGPU instance \a vgpuInstance. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param pPlacement Pointer to vGPU placement ID structure \a nvmlVgpuPlacementId_t + * + * @return + * - \ref NVML_SUCCESS If information is successfully retrieved + * - \ref NVML_ERROR_NOT_FOUND If \a vgpuInstance does not match a valid active vGPU instance + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuInstance is invalid or \a pPlacement is NULL + * - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacement is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuPlacementId_t *pPlacement); + +/** + * Query the supported vGPU placement ID of the vGPU type. + * + * An array of supported vGPU placement IDs for the vGPU type ID indicated by \a vgpuTypeId is returned in the + * caller-supplied buffer of \a pPlacementList->placementIds. Memory needed for the placementIds array should be + * allocated based on maximum instances of a vGPU type which can be queried via \ref nvmlVgpuTypeGetMaxInstances(). + * + * This function will return supported placement IDs even if GPU is not in vGPU heterogeneous mode. + * + * @param device Identifier of the target device + * @param vgpuTypeId Handle to vGPU type. The vGPU type ID + * @param pPlacementList Pointer to the vGPU placement structure \a nvmlVgpuPlacementList_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device or \a vgpuTypeId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacementList is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t *pPlacementList); + +/** + * Query the creatable vGPU placement ID of the vGPU type. + * + * An array of creatable vGPU placement IDs for the vGPU type ID indicated by \a vgpuTypeId is returned in the + * caller-supplied buffer of \a pPlacementList->placementIds. Memory needed for the placementIds array should be + * allocated based on maximum instances of a vGPU type which can be queried via \ref nvmlVgpuTypeGetMaxInstances(). + * The creatable vGPU placement IDs may differ over time, as there may be restrictions on what type of vGPU the + * vGPU instance is running. + * + * The function will return \ref NVML_ERROR_NOT_SUPPORTED if the \a device is not in vGPU heterogeneous mode. + * + * @param device The identifier of the target device + * @param vgpuTypeId Handle to vGPU type. The vGPU type ID + * @param pPlacementList Pointer to the list of vGPU placement structure \a nvmlVgpuPlacementList_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device or \a vgpuTypeId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_VERSION_MISMATCH If the version of \a pPlacementList is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t *pPlacementList); + +/** + * Retrieve the static GSP heap size of the vGPU type in bytes + * + * @param vgpuTypeId Handle to vGPU type + * @param gspHeapSize Reference to return the GSP heap size value + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a gspHeapSize is NULL + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *gspHeapSize); + +/** + * Retrieve the static framebuffer reservation of the vGPU type in bytes + * + * @param vgpuTypeId Handle to vGPU type + * @param fbReservation Reference to return the framebuffer reservation + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a fbReservation is NULL + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbReservation); + +/** + * Set the desirable vGPU capability of a device + * + * Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be set. + * See \ref nvmlEnableState_t for available state. + * + * @param device The identifier of the target device + * @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be set + * @param state The target capability mode + * + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, or \a capability is invalid, or \a state is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state, or \a device not in vGPU mode + * - \ref NVML_ERROR_UNKNOWN On any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceSetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, nvmlEnableState_t state); + +/** + * Retrieve the vGPU Software licensable features. + * + * Identifies whether the system supports vGPU Software Licensing. If it does, return the list of licensable feature(s) + * and their current license status. + * + * @param device Identifier of the target device + * @param pGridLicensableFeatures Pointer to structure in which vGPU software licensable features are returned + * + * @return + * - \ref NVML_SUCCESS if licensable features are successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpu vGPU Management + * @{ + * + * This chapter describes APIs supporting NVIDIA vGPU. + */ +/***************************************************************************************************/ + +/** + * Retrieve the requested vGPU driver capability. + * + * Refer to the \a nvmlVgpuDriverCapability_t structure for the specific capabilities that can be queried. + * The return value in \a capResult should be treated as a boolean, with a non-zero value indicating that the capability + * is supported. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param capability Specifies the \a nvmlVgpuDriverCapability_t to be queried + * @param capResult A boolean for the queried capability indicating that feature is supported + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a capability is invalid, or \a capResult is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a devices not in vGPU mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlGetVgpuDriverCapabilities(nvmlVgpuDriverCapability_t capability, unsigned int *capResult); + +/** + * Retrieve the requested vGPU capability for GPU. + * + * Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be queried. + * The return value in \a capResult reports a non-zero value indicating that the capability + * is supported, and also reports the capability's data based on the queried capability. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be queried + * @param capResult Specifies that the queried capability is supported, and also returns capability's data + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a capability is invalid, or \a capResult is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a device not in vGPU mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, unsigned int *capResult); + +/** + * Retrieve the supported vGPU types on a physical GPU (device). + * + * An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer + * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount + * is used to return the number of vGPU types written to the buffer. + * + * If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. + * To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0. + * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types + * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); + +/** + * Retrieve the currently creatable vGPU types on a physical GPU (device). + * + * An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer + * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount + * is used to return the number of vGPU types written to the buffer. + * + * The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types + * can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable + * list will be restricted to whatever vGPU type is already running on the device. + * + * If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. + * To query the number of vGPU types that can be created for the GPU, call this function with *vgpuCount = 0. + * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types + * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); + +/** + * Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeClass Pointer to string array to return class in + * @param size Size of string + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeClass, unsigned int *size); + +/** + * Retrieve the vGPU type name. + * + * The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not + * exceed 64 characters in length (including the NUL terminator). See \ref + * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeName Pointer to buffer to return name + * @param size Size of buffer + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeName, unsigned int *size); + +/** + * Retrieve the GPU Instance Profile ID for the given vGPU type ID. + * The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is + * returned. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param gpuInstanceProfileId GPU Instance Profile ID + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device is not in vGPU Host virtualization mode + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a gpuInstanceProfileId is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *gpuInstanceProfileId); + +/** + * Retrieve the device ID of a vGPU type. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param deviceID Device ID and vendor ID of the device contained in single 32 bit value + * @param subsystemID Subsystem ID and subsystem vendor ID of the device contained in single 32 bit value + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *deviceID, unsigned long long *subsystemID); + +/** + * Retrieve the vGPU framebuffer size in bytes. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param fbSize Pointer to framebuffer size in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbSize); + +/** + * Retrieve count of vGPU's supported display heads. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param numDisplayHeads Pointer to number of display heads + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *numDisplayHeads); + +/** + * Retrieve vGPU display head's maximum supported resolution. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param displayIndex Zero-based index of display head + * @param xdim Pointer to maximum number of pixels in X dimension + * @param ydim Pointer to maximum number of pixels in Y dimension + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex + * is out of range. + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int *xdim, unsigned int *ydim); + +/** + * Retrieve license requirements for a vGPU type + * + * The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form + * ",", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license, + * the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0". + * + * The total length of the returned string will not exceed 128 characters, including the NUL terminator. + * See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeLicenseString Pointer to buffer to return license info + * @param size Size of \a vgpuTypeLicenseString buffer + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeLicenseString, unsigned int size); + +/** + * Retrieve the static frame rate limit value of the vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param frameRateLimit Reference to return the frame rate limit value + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a frameRateLimit is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *frameRateLimit); + +/** + * Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param vgpuTypeId Handle to vGPU type + * @param vgpuInstanceCount Pointer to get the max number of vGPU instances + * that can be created on a deicve for given vgpuTypeId + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device, + * or \a vgpuInstanceCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCount); + +/** + * Retrieve the maximum number of vGPU instances supported per VM for given vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuInstanceCountPerVm Pointer to get the max number of vGPU instances supported per VM for given \a vgpuTypeId + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuInstanceCountPerVm is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCountPerVm); + +/** + * Retrieve the active vGPU instances on a device. + * + * An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The + * array element count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances + * written to the buffer. + * + * If the supplied buffer is not large enough to accommodate the vGPU instance array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount. + * To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return + * NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer which passes in the array size as well as get + * back the number of types + * @param vgpuInstances Pointer to array in which to return list of vGPU instances + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances); + +/** + * Retrieve the VM ID associated with a vGPU instance. + * + * The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param vmId Pointer to caller-supplied buffer to hold VM ID + * @param size Size of buffer in bytes + * @param vmIdType Pointer to hold VM ID type + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vmId or \a vmIdType is NULL, or \a vgpuInstance is 0 + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char *vmId, unsigned int size, nvmlVgpuVmIdType_t *vmIdType); + +/** + * Retrieve the UUID of a vGPU instance. + * + * The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string, + * not exceeding 80 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param uuid Pointer to caller-supplied buffer to hold vGPU UUID + * @param size Size of buffer in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a uuid is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char *uuid, unsigned int size); + +/** + * Retrieve the NVIDIA driver version installed in the VM associated with a vGPU. + * + * The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version + * string will not exceed 80 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. + * + * nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is + * returned as "Not Available" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the + * NVIDIA driver is loaded and initialized. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param version Caller-supplied buffer to return driver version string + * @param length Size of \a version buffer + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0 + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length); + +/** + * Retrieve the framebuffer usage in bytes. + * + * Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance The identifier of the target instance + * @param fbUsage Pointer to framebuffer usage in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbUsage is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long *fbUsage); + +/** + * @deprecated Use \ref nvmlVgpuInstanceGetLicenseInfo_v2. + * + * Retrieve the current licensing state of the vGPU instance. + * + * If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param licensed Reference to return the licensing status + * + * @return + * - \ref NVML_SUCCESS if \a licensed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licensed is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int *licensed); + +/** + * Retrieve the vGPU type of a vGPU instance. + * + * Returns the vGPU type ID of vgpu assigned to the vGPU instance. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param vgpuTypeId Reference to return the vgpuTypeId + * + * @return + * - \ref NVML_SUCCESS if \a vgpuTypeId has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuTypeId is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t *vgpuTypeId); + +/** + * Retrieve the frame rate limit set for the vGPU instance. + * + * Returns the value of the frame rate limit set for the vGPU instance + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param frameRateLimit Reference to return the frame rate limit + * + * @return + * - \ref NVML_SUCCESS if \a frameRateLimit has been set + * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a frameRateLimit is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int *frameRateLimit); + +/** + * Retrieve the current ECC mode of vGPU instance. + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param eccMode Reference in which to return the current ECC mode + * + * @return + * - \ref NVML_SUCCESS if the vgpuInstance's ECC mode has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t *eccMode); + +/** + * Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param encoderCapacity Reference to an unsigned int for the encoder capacity + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity has been retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderQueryType is invalid + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity); + +/** + * Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param encoderCapacity Unsigned int for the encoder capacity value + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderCapacity is out of range of 0-100. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity); + +/** + * Retrieves the current encoder statistics of a vGPU Instance + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param sessionCount Reference to an unsigned int for count of active encoder sessions + * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions + * @param averageLatency Reference to an unsigned int for encode latency in microseconds + * + * @return + * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL + * or \a vgpuInstance is 0. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, + unsigned int *averageFps, unsigned int *averageLatency); + +/** + * Retrieves information about all active encoder sessions on a vGPU Instance. + * + * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The + * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions + * written to the buffer. + * + * If the supplied buffer is not large enough to accommodate the active session array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. + * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return + * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param sessionCount Reference to caller supplied array size, and returns + * the number of sessions. + * @param sessionInfo Reference to caller supplied array in which the list + * of session information us returned. + * + * @return + * - \ref NVML_SUCCESS if \a sessionInfo is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is + returned in \a sessionCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL, or \a vgpuInstance is 0. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfo); + +/** +* Retrieves the active frame buffer capture sessions statistics of a vGPU Instance +* +* For Maxwell &tm; or newer fully supported devices. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats +* +* @return +* - \ref NVML_SUCCESS if \a fbcStats is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbcStats is NULL +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t *fbcStats); + +/** +* Retrieves information about active frame buffer capture sessions on a vGPU Instance. +* +* An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The +* array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions +* written to the buffer. +* +* If the supplied buffer is not large enough to accommodate the active session array, the function returns +* NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. +* To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return +* NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may +* be zero if there are no new frames captured since the session started. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param sessionCount Reference to caller supplied array size, and returns the number of sessions. +* @param sessionInfo Reference in which to return the session information +* +* @return +* - \ref NVML_SUCCESS if \a sessionInfo is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a sessionCount is NULL. +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlFBCSessionInfo_t *sessionInfo); + +/** +* Retrieve the GPU Instance ID for the given vGPU Instance. +* The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned. +* +* For Kepler &tm; or newer fully supported devices. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param gpuInstanceId GPU Instance ID +* +* @return +* - \ref NVML_SUCCESS successful completion +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a gpuInstanceId is NULL. +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance_t vgpuInstance, unsigned int *gpuInstanceId); + +/** +* Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM. +* +* The vGPU PCI id is returned as "00000000:00:00.0" if NVIDIA driver is not installed on the vGPU instance. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param vgpuPciId Caller-supplied buffer to return vGPU PCI Id string +* @param length Size of the vgpuPciId buffer +* +* @return +* - \ref NVML_SUCCESS if vGPU PCI Id is sucessfully retrieved +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuPciId is NULL +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance +* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small, \a length is set to required length +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t vgpuInstance, char *vgpuPciId, unsigned int *length); + +/** +* Retrieve the requested capability for a given vGPU type. Refer to the \a nvmlVgpuCapability_t structure +* for the specific capabilities that can be queried. The return value in \a capResult should be treated as +* a boolean, with a non-zero value indicating that the capability is supported. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @param vgpuTypeId Handle to vGPU type +* @param capability Specifies the \a nvmlVgpuCapability_t to be queried +* @param capResult A boolean for the queried capability indicating that feature is supported +* +* @return +* - \ref NVML_SUCCESS successful completion +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a capability is invalid, or \a capResult is NULL +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuCapability_t capability, unsigned int *capResult); + +/** + * Retrieve the MDEV UUID of a vGPU instance. + * + * The MDEV UUID is a globally unique identifier of the mdev device assigned to the VM, and is returned as a 5-part hexadecimal string, + * not exceeding 80 characters in length (including the NULL terminator). + * MDEV UUID is displayed only on KVM platform. + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param mdevUuid Pointer to caller-supplied buffer to hold MDEV UUID + * @param size Size of buffer in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED on any hypervisor other than KVM + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mdevUuid is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t vgpuInstance, char *mdevUuid, unsigned int size); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvml vGPU Migration + * This chapter describes operations that are associated with vGPU Migration. + * @{ + */ +/***************************************************************************************************/ + +/** + * Structure representing range of vGPU versions. + */ +typedef struct nvmlVgpuVersion_st +{ + unsigned int minVersion; //!< Minimum vGPU version. + unsigned int maxVersion; //!< Maximum vGPU version. +} nvmlVgpuVersion_t; + +/** + * vGPU metadata structure. + */ +typedef struct nvmlVgpuMetadata_st +{ + unsigned int version; //!< Current version of the structure + unsigned int revision; //!< Current revision of the structure + nvmlVgpuGuestInfoState_t guestInfoState; //!< Current state of Guest-dependent fields + char guestDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in guest + char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in host + unsigned int reserved[6]; //!< Reserved for internal use + unsigned int vgpuVirtualizationCaps; //!< vGPU virtualization capabilities bitfield + unsigned int guestVgpuVersion; //!< vGPU version of guest driver + unsigned int opaqueDataSize; //!< Size of opaque data field in bytes + char opaqueData[4]; //!< Opaque data +} nvmlVgpuMetadata_t; + +/** + * Physical GPU metadata structure + */ +typedef struct nvmlVgpuPgpuMetadata_st +{ + unsigned int version; //!< Current version of the structure + unsigned int revision; //!< Current revision of the structure + char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Host driver version + unsigned int pgpuVirtualizationCaps; //!< Pgpu virtualization capabilities bitfield + unsigned int reserved[5]; //!< Reserved for internal use + nvmlVgpuVersion_t hostSupportedVgpuRange; //!< vGPU version range supported by host driver + unsigned int opaqueDataSize; //!< Size of opaque data field in bytes + char opaqueData[4]; //!< Opaque data +} nvmlVgpuPgpuMetadata_t; + +/** + * vGPU VM compatibility codes + */ +typedef enum nvmlVgpuVmCompatibility_enum +{ + NVML_VGPU_VM_COMPATIBILITY_NONE = 0x0, //!< vGPU is not runnable + NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1, //!< vGPU is runnable from a cold / powered-off state (ACPI S5) + NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2, //!< vGPU is runnable from a hibernated state (ACPI S4) + NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4, //!< vGPU is runnable from a sleeped state (ACPI S3) + NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8 //!< vGPU is runnable from a live/paused (ACPI S0) +} nvmlVgpuVmCompatibility_t; + +/** + * vGPU-pGPU compatibility limit codes + */ +typedef enum nvmlVgpuPgpuCompatibilityLimitCode_enum +{ + NVML_VGPU_COMPATIBILITY_LIMIT_NONE = 0x0, //!< Compatibility is not limited. + NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1, //!< ompatibility is limited by host driver version. + NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2, //!< Compatibility is limited by guest driver version. + NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4, //!< Compatibility is limited by GPU hardware. + NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000 //!< Compatibility is limited by an undefined factor. +} nvmlVgpuPgpuCompatibilityLimitCode_t; + +/** + * vGPU-pGPU compatibility structure + */ +typedef struct nvmlVgpuPgpuCompatibility_st +{ + nvmlVgpuVmCompatibility_t vgpuVmCompatibility; //!< Compatibility of vGPU VM. See \ref nvmlVgpuVmCompatibility_t + nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode; //!< Limiting factor for vGPU-pGPU compatibility. See \ref nvmlVgpuPgpuCompatibilityLimitCode_t +} nvmlVgpuPgpuCompatibility_t; + +/** + * Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its associated VM + * such as the currently installed NVIDIA guest driver version, together with host driver version and an opaque data section + * containing internal state. + * + * nvmlVgpuInstanceGetMetadata() may be called at any time for a vGPU instance. Some fields in the returned structure are + * dependent on information obtained from the guest VM, which may not yet have reached a state where that information + * is available. The current state of these dependent fields is reflected in the info structure's \ref nvmlVgpuGuestInfoState_t field. + * + * The VMM may choose to read and save the vGPU's VM info as persistent metadata associated with the VM, and provide + * it to Virtual GPU Manager when creating a vGPU for subsequent instances of the VM. + * + * The caller passes in a buffer via \a vgpuMetadata, with the size of the buffer in \a bufferSize. If the vGPU Metadata structure + * is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed + * in \a bufferSize. + * + * @param vgpuInstance vGPU instance handle + * @param vgpuMetadata Pointer to caller-supplied buffer into which vGPU metadata is written + * @param bufferSize Size of vgpuMetadata buffer + * + * @return + * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned + * - \ref NVML_ERROR_INSUFFICIENT_SIZE vgpuMetadata buffer is too small, required size is returned in \a bufferSize + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a vgpuInstance is 0; if \a vgpuMetadata is NULL and the value of \a bufferSize is not 0. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuMetadata_t *vgpuMetadata, unsigned int *bufferSize); + +/** + * Returns a vGPU metadata structure for the physical GPU indicated by \a device. The structure contains information about + * the GPU and the currently installed NVIDIA host driver version that's controlling it, together with an opaque data section + * containing internal state. + * + * The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the \a pgpuMetadata + * structure is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed + * in \a bufferSize. + * + * @param device The identifier of the target device + * @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written + * @param bufferSize Pointer to size of \a pgpuMetadata buffer + * + * @return + * - \ref NVML_SUCCESS GPU metadata structure was successfully returned + * - \ref NVML_ERROR_INSUFFICIENT_SIZE pgpuMetadata buffer is too small, required size is returned in \a bufferSize + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. + * - \ref NVML_ERROR_NOT_SUPPORTED vGPU is not supported by the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpuMetadata_t *pgpuMetadata, unsigned int *bufferSize); + +/** + * Takes a vGPU instance metadata structure read from \ref nvmlVgpuInstanceGetMetadata(), and a vGPU metadata structure for a + * physical GPU read from \ref nvmlDeviceGetVgpuMetadata(), and returns compatibility information of the vGPU instance and the + * physical GPU. + * + * The caller passes in a buffer via \a compatibilityInfo, into which a compatibility information structure is written. The + * structure defines the states in which the vGPU / VM may be booted on the physical GPU. If the vGPU / VM compatibility + * with the physical GPU is limited, a limit code indicates the factor limiting compatability. + * (see \ref nvmlVgpuPgpuCompatibilityLimitCode_t for details). + * + * Note: vGPU compatibility does not take into account dynamic capacity conditions that may limit a system's ability to + * boot a given vGPU or associated VM. + * + * @param vgpuMetadata Pointer to caller-supplied vGPU metadata structure + * @param pgpuMetadata Pointer to caller-supplied GPU metadata structure + * @param compatibilityInfo Pointer to caller-supplied buffer to hold compatibility info + * + * @return + * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *vgpuMetadata, nvmlVgpuPgpuMetadata_t *pgpuMetadata, nvmlVgpuPgpuCompatibility_t *compatibilityInfo); + +/** + * Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format. + * + * The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the + * string is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed + * in \a bufferSize. + * + * @param device The identifier of the target device + * @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written + * @param bufferSize Pointer to size of \a pgpuMetadata buffer + * + * @return + * - \ref NVML_SUCCESS GPU metadata structure was successfully returned + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a pgpuMetadata buffer is too small, required size is returned in \a bufferSize + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char *pgpuMetadata, unsigned int *bufferSize); + +/** + * Returns the vGPU Software scheduler logs. + * \a pSchedulerLog points to a caller-allocated structure to contain the logs. The number of elements returned will + * never exceed \a NVML_SCHEDULER_SW_MAX_LOG_ENTRIES. + * + * To get the entire logs, call the function atleast 5 times a second. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target \a device + * @param pSchedulerLog Reference in which \a pSchedulerLog is written + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler logs were successfully obtained + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerLog is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t device, nvmlVgpuSchedulerLog_t *pSchedulerLog); + +/** + * Returns the vGPU scheduler state. + * The information returned in \a nvmlVgpuSchedulerGetState_t is not relevant if the BEST EFFORT policy is set. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target \a device + * @param pSchedulerState Reference in which \a pSchedulerState is returned + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler state is successfully obtained + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerGetState_t *pSchedulerState); + +/** + * Returns the vGPU scheduler capabilities. + * The list of supported vGPU schedulers returned in \a nvmlVgpuSchedulerCapabilities_t is from + * the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies + * if the engine is Graphics type. + * The other values in \a nvmlVgpuSchedulerCapabilities_t are also applicable if the engine is + * Graphics type. For other engine types, it is BEST EFFORT policy. + * If ARR is supported and enabled, scheduling frequency and averaging factor are applicable + * else timeSlice is applicable. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target \a device + * @param pCapabilities Reference in which \a pCapabilities is written + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler capabilities were successfully obtained + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pCapabilities is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice_t device, nvmlVgpuSchedulerCapabilities_t *pCapabilities); + +/** + * Sets the vGPU scheduler state. + * + * For Pascal &tm; or newer fully supported devices. + * + * The scheduler state change won't persist across module load/unload. + * Scheduler state and params will be allowed to set only when no VM is running. + * In \a nvmlVgpuSchedulerSetState_t, IFF enableARRMode is enabled then + * provide avgFactorForARR and frequency as input. If enableARRMode is disabled + * then provide timeslice as input. + * + * @param device The identifier of the target \a device + * @param pSchedulerState vGPU \a pSchedulerState to set + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler state has been successfully set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid + * - \ref NVML_ERROR_RESET_REQUIRED if setting \a pSchedulerState failed with fatal error, + * reboot is required to overcome from this error. + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * or if any vGPU instance currently exists on the \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerSetState_t *pSchedulerState); + +/* + * Virtual GPU (vGPU) version + * + * The NVIDIA vGPU Manager and the guest drivers are tagged with a range of supported vGPU versions. This determines the range of NVIDIA guest driver versions that + * are compatible for vGPU feature support with a given NVIDIA vGPU Manager. For vGPU feature support, the range of supported versions for the NVIDIA vGPU Manager + * and the guest driver must overlap. Otherwise, the guest driver fails to load in the VM. + * + * When the NVIDIA guest driver loads, either when the VM is booted or when the driver is installed or upgraded, a negotiation occurs between the guest driver + * and the NVIDIA vGPU Manager to select the highest mutually compatible vGPU version. The negotiated vGPU version stays the same across VM migration. + */ + +/** + * Query the ranges of supported vGPU versions. + * + * This function gets the linear range of supported vGPU versions that is preset for the NVIDIA vGPU Manager and the range set by an administrator. + * If the preset range has not been overridden by \ref nvmlSetVgpuVersion, both ranges are the same. + * + * The caller passes pointers to the following \ref nvmlVgpuVersion_t structures, into which the NVIDIA vGPU Manager writes the ranges: + * 1. \a supported structure that represents the preset range of vGPU versions supported by the NVIDIA vGPU Manager. + * 2. \a current structure that represents the range of supported vGPU versions set by an administrator. By default, this range is the same as the preset range. + * + * @param supported Pointer to the structure in which the preset range of vGPU versions supported by the NVIDIA vGPU Manager is written + * @param current Pointer to the structure in which the range of supported vGPU versions set by an administrator is written + * + * @return + * - \ref NVML_SUCCESS The vGPU version range structures were successfully obtained. + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported. + * - \ref NVML_ERROR_INVALID_ARGUMENT The \a supported parameter or the \a current parameter is NULL. + * - \ref NVML_ERROR_UNKNOWN An error occurred while the data was being fetched. + */ +nvmlReturn_t DECLDIR nvmlGetVgpuVersion(nvmlVgpuVersion_t *supported, nvmlVgpuVersion_t *current); + +/** + * Override the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator. + * + * This function configures the NVIDIA vGPU Manager with a range of supported vGPU versions set by an administrator. This range must be a subset of the + * preset range that the NVIDIA vGPU Manager supports. The custom range set by an administrator takes precedence over the preset range and is advertised to + * the guest VM for negotiating the vGPU version. See \ref nvmlGetVgpuVersion for details of how to query the preset range of versions supported. + * + * This function takes a pointer to vGPU version range structure \ref nvmlVgpuVersion_t as input to override the preset vGPU version range that the NVIDIA vGPU Manager supports. + * + * After host system reboot or driver reload, the range of supported versions reverts to the range that is preset for the NVIDIA vGPU Manager. + * + * @note 1. The range set by the administrator must be a subset of the preset range that the NVIDIA vGPU Manager supports. Otherwise, an error is returned. + * 2. If the range of supported guest driver versions does not overlap the range set by the administrator, the guest driver fails to load. + * 3. If the range of supported guest driver versions overlaps the range set by the administrator, the guest driver will load with a negotiated + * vGPU version that is the maximum value in the overlapping range. + * 4. No VMs must be running on the host when this function is called. If a VM is running on the host, the call to this function fails. + * + * @param vgpuVersion Pointer to a caller-supplied range of supported vGPU versions. + * + * @return + * - \ref NVML_SUCCESS The preset range of supported vGPU versions was successfully overridden. + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported. + * - \ref NVML_ERROR_IN_USE The range was not overridden because a VM is running on the host. + * - \ref NVML_ERROR_INVALID_ARGUMENT The \a vgpuVersion parameter specifies a range that is outside the range supported by the NVIDIA vGPU Manager or if \a vgpuVersion is NULL. + */ +nvmlReturn_t DECLDIR nvmlSetVgpuVersion(nvmlVgpuVersion_t *vgpuVersion); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUtil vGPU Utilization and Accounting + * This chapter describes operations that are associated with vGPU Utilization and Accounting. + * @{ + */ +/***************************************************************************************************/ + +/** + * Retrieves current utilization for vGPUs on a physical GPU (device). + * + * For Kepler &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running + * on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer + * pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the + * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values + * in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to + * indicate the returned value type. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate + * a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with + * the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample + * structures that were actually written. This may differ from a previously read value as vGPU instances are created or + * destroyed. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values + * @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances + * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is + * NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all + * vGPU instances currently executing on the device + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, + nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, + nvmlVgpuInstanceUtilizationSample_t *utilizationSamples); + +/** + * Retrieves recent utilization for vGPU instances running on a physical GPU (device). + * + * For Kepler &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for vGPU + * instances running on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied + * buffer pointed at by \a vgpuUtilInfo->vgpuUtilArray. One utilization sample structure is returned per vGPU instance, and includes the + * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values + * in nvmlValue_t unions. The function sets the caller-supplied \a vgpuUtilInfo->sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to + * indicate the returned value type. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a vgpuUtilInfo->vgpuUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuUtilInfo->vgpuInstanceCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate + * a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t). Invoke the function again with + * the allocated buffer passed in \a vgpuUtilInfo->vgpuUtilArray, and \a vgpuUtilInfo->vgpuInstanceCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuUtilInfo->vgpuInstanceCount with the number of vGPU utilization sample + * structures that were actually written. This may differ from a previously read value as vGPU instances are created or + * destroyed. + * + * \a vgpuUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a vgpuUtilInfo->lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param vgpuUtilInfo Pointer to the caller-provided structure of nvmlVgpuInstancesUtilizationInfo_t + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuUtilInfo is NULL, or \a vgpuUtilInfo->vgpuInstanceCount is 0 + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a vgpuUtilInfo is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a vgpuUtilInfo->vgpuUtilArray is NULL, or the buffer size of vgpuUtilInfo->vgpuInstanceCount is too small. + * The caller should check the current vGPU instance count from the returned vgpuUtilInfo->vgpuInstanceCount, and call + * the function again with a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t) + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice_t device, + nvmlVgpuInstancesUtilizationInfo_t *vgpuUtilInfo); + +/** + * Retrieves current utilization for processes running on vGPUs on a physical GPU (device). + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on + * vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the + * caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running + * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which + * the samples were recorded. Individual utilization values are returned as "unsigned int" values. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size + * vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with + * the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample + * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active + * in any given sample period. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances + * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is + * passed with a non-NULL \a utilizationSamples + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all + * vGPU instances currently executing on the device + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, + unsigned int *vgpuProcessSamplesCount, + nvmlVgpuProcessUtilizationSample_t *utilizationSamples); + +/** + * Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device). + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for processes running + * on vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied + * buffer pointed at by \a vgpuProcUtilInfo->vgpuProcUtilArray. One utilization sample structure is returned per process running + * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which + * the samples were recorded. Individual utilization values are returned as "unsigned int" values. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a vgpuProcUtilInfo->vgpuProcUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current processes' count + * running on vGPU instances in \a vgpuProcUtilInfo->vgpuProcessCount. The caller should allocate a buffer of size + * vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed + * in \a vgpuProcUtilInfo->vgpuProcUtilArray, and \a vgpuProcUtilInfo->vgpuProcessCount set to the number of entries the buffer is sized for. + * + * On successful return, the function updates \a vgpuProcUtilInfo->vgpuProcessCount with the number of vGPU sub process utilization sample + * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active + * in any given sample period. + * + * vgpuProcUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set vgpuProcUtilInfo->lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param vgpuProcUtilInfo Pointer to the caller-provided structure of nvmlVgpuProcessesUtilizationInfo_t + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuProcUtilInfo is null + * - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a vgpuProcUtilInfo is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a vgpuProcUtilInfo->vgpuProcUtilArray is null, or supplied \a vgpuProcUtilInfo->vgpuProcessCount + * is too small to return samples for all processes on vGPU instances currently executing on the device. + * The caller should check the current processes count from the returned \a vgpuProcUtilInfo->vgpuProcessCount, + * and call the function again with a buffer of size + * vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t) + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice_t device, nvmlVgpuProcessesUtilizationInfo_t *vgpuProcUtilInfo); + +/** + * Queries the state of per process accounting mode on vGPU. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param mode Reference in which to return the current accounting mode + * + * @return + * - \ref NVML_SUCCESS if the mode has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t *mode); + +/** + * Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes + * returned can be in running or terminated state. + * + * For Maxwell &tm; or newer fully supported devices. + * + * To just query the maximum number of processes that can be queried, call this function with *count = 0 and + * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. + * + * For more details see \ref nvmlVgpuInstanceGetAccountingStats. + * + * @note In case of PID collision some processes might not be accessible before the circular buffer is full. + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param count Reference in which to provide the \a pids array size, and + * to return the number of elements ready to be queried + * @param pids Reference in which to return list of process ids + * + * @return + * - \ref NVML_SUCCESS if pids were successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a count is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to expected value) + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlVgpuInstanceGetAccountingPids + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance_t vgpuInstance, unsigned int *count, unsigned int *pids); + +/** + * Queries process's accounting stats. + * + * For Maxwell &tm; or newer fully supported devices. + * + * Accounting stats capture GPU utilization and other statistics across the lifetime of a process, and + * can be queried during life time of the process or after its termination. + * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and + * updated to actual running time after its termination. + * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old + * processes. + * + * See \ref nvmlAccountingStats_t for description of each returned metric. + * List of processes that can be queried can be retrieved from \ref nvmlVgpuInstanceGetAccountingPids. + * + * @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode. + * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be + * queried since they don't contribute to GPU utilization. + * @note In case of pid collision stats of only the latest process (that terminated last) will be reported + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param pid Process Id of the target process to query stats for + * @param stats Reference in which to return the process's accounting stats + * + * @return + * - \ref NVML_SUCCESS if stats have been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a stats is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * or \a stats is not found + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t vgpuInstance, unsigned int pid, nvmlAccountingStats_t *stats); + +/** + * Clears accounting information of the vGPU instance that have already terminated. + * + * For Maxwell &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode. + * @note Only compute and graphics applications stats are reported and can be cleared since monitoring applications + * stats don't contribute to GPU utilization. + * + * @param vgpuInstance The identifier of the target vGPU instance + * + * @return + * - \ref NVML_SUCCESS if accounting information has been cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance_t vgpuInstance); + +/** + * Query the license information of the vGPU instance. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param licenseInfo Pointer to vGPU license information structure + * + * @return + * - \ref NVML_SUCCESS if information is successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licenseInfo is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t *licenseInfo); +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlExcludedGpuQueries Excluded GPU Queries + * This chapter describes NVML operations that are associated with excluded GPUs. + * @{ + */ +/***************************************************************************************************/ + +/** + * Excluded GPU device information + **/ +typedef struct nvmlExcludedDeviceInfo_st +{ + nvmlPciInfo_t pciInfo; //!< The PCI information for the excluded GPU + char uuid[NVML_DEVICE_UUID_BUFFER_SIZE]; //!< The ASCII string UUID for the excluded GPU +} nvmlExcludedDeviceInfo_t; + + /** + * Retrieves the number of excluded GPU devices in the system. + * + * For all products. + * + * @param deviceCount Reference in which to return the number of excluded devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL + */ +nvmlReturn_t DECLDIR nvmlGetExcludedDeviceCount(unsigned int *deviceCount); + +/** + * Acquire the device information for an excluded GPU device, based on its index. + * + * For all products. + * + * Valid indices are derived from the \a deviceCount returned by + * \ref nvmlGetExcludedDeviceCount(). For example, if \a deviceCount is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * @param index The index of the target GPU, >= 0 and < \a deviceCount + * @param info Reference in which to return the device information + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a info is NULL + * + * @see nvmlGetExcludedDeviceCount + */ +nvmlReturn_t DECLDIR nvmlGetExcludedDeviceInfoByIndex(unsigned int index, nvmlExcludedDeviceInfo_t *info); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlMultiInstanceGPU Multi Instance GPU Management + * This chapter describes NVML operations that are associated with Multi Instance GPU management. + * @{ + */ +/***************************************************************************************************/ + +/** + * Disable Multi Instance GPU mode. + */ +#define NVML_DEVICE_MIG_DISABLE 0x0 + +/** + * Enable Multi Instance GPU mode. + */ +#define NVML_DEVICE_MIG_ENABLE 0x1 + +/** + * GPU instance profiles. + * + * These macros should be passed to \ref nvmlDeviceGetGpuInstanceProfileInfo to retrieve the + * detailed information about a GPU instance such as profile ID, engine counts. + */ +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE 0x0 +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE 0x1 +#define NVML_GPU_INSTANCE_PROFILE_3_SLICE 0x2 +#define NVML_GPU_INSTANCE_PROFILE_4_SLICE 0x3 +#define NVML_GPU_INSTANCE_PROFILE_7_SLICE 0x4 +#define NVML_GPU_INSTANCE_PROFILE_8_SLICE 0x5 +#define NVML_GPU_INSTANCE_PROFILE_6_SLICE 0x6 +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV1 0x7 +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE_REV1 0x8 +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV2 0x9 +#define NVML_GPU_INSTANCE_PROFILE_COUNT 0xA + +/** + * MIG GPU instance profile capability. + * + * Bit field values representing MIG profile capabilities + * \ref nvmlGpuInstanceProfileInfo_v3_t.capabilities + */ +#define NVML_GPU_INTSTANCE_PROFILE_CAPS_P2P 0x1 + +/** + * MIG compute instance profile capability. + * + * Bit field values representing MIG profile capabilities + * \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities + */ +/* No capabilities for compute profiles currently exposed */ + +typedef struct nvmlGpuInstancePlacement_st +{ + unsigned int start; //!< Index of first occupied memory slice + unsigned int size; //!< Number of memory slices occupied +} nvmlGpuInstancePlacement_t; + +/** + * GPU instance profile information. + */ +typedef struct nvmlGpuInstanceProfileInfo_st +{ + unsigned int id; //!< Unique profile ID within the device + unsigned int isP2pSupported; //!< Peer-to-Peer support + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< GPU instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int copyEngineCount; //!< Copy Engine count + unsigned int decoderCount; //!< Decoder Engine count + unsigned int encoderCount; //!< Encoder Engine count + unsigned int jpegCount; //!< JPEG Engine count + unsigned int ofaCount; //!< OFA Engine count + unsigned long long memorySizeMB; //!< Memory size in MBytes +} nvmlGpuInstanceProfileInfo_t; + +/** + * GPU instance profile information (v2). + * + * Version 2 adds the \ref nvmlGpuInstanceProfileInfo_v2_t.version field + * to the start of the structure, and the \ref nvmlGpuInstanceProfileInfo_v2_t.name + * field to the end. This structure is not backwards-compatible with + * \ref nvmlGpuInstanceProfileInfo_t. + */ +typedef struct nvmlGpuInstanceProfileInfo_v2_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v2) + unsigned int id; //!< Unique profile ID within the device + unsigned int isP2pSupported; //!< Peer-to-Peer support + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< GPU instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int copyEngineCount; //!< Copy Engine count + unsigned int decoderCount; //!< Decoder Engine count + unsigned int encoderCount; //!< Encoder Engine count + unsigned int jpegCount; //!< JPEG Engine count + unsigned int ofaCount; //!< OFA Engine count + unsigned long long memorySizeMB; //!< Memory size in MBytes + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name +} nvmlGpuInstanceProfileInfo_v2_t; + +/** + * Version identifier value for \ref nvmlGpuInstanceProfileInfo_v2_t.version. + */ +#define nvmlGpuInstanceProfileInfo_v2 NVML_STRUCT_VERSION(GpuInstanceProfileInfo, 2) + +/** + * GPU instance profile information (v3). + * + * Version 3 removes isP2pSupported field and adds the \ref nvmlGpuInstanceProfileInfo_v3_t.capabilities + * field \ref nvmlGpuInstanceProfileInfo_t. + */ +typedef struct nvmlGpuInstanceProfileInfo_v3_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v3) + unsigned int id; //!< Unique profile ID within the device + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< GPU instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int copyEngineCount; //!< Copy Engine count + unsigned int decoderCount; //!< Decoder Engine count + unsigned int encoderCount; //!< Encoder Engine count + unsigned int jpegCount; //!< JPEG Engine count + unsigned int ofaCount; //!< OFA Engine count + unsigned long long memorySizeMB; //!< Memory size in MBytes + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name + unsigned int capabilities; //!< Additional capabilities +} nvmlGpuInstanceProfileInfo_v3_t; + +/** + * Version identifier value for \ref nvmlGpuInstanceProfileInfo_v3_t.version. + */ +#define nvmlGpuInstanceProfileInfo_v3 NVML_STRUCT_VERSION(GpuInstanceProfileInfo, 3) + +typedef struct nvmlGpuInstanceInfo_st +{ + nvmlDevice_t device; //!< Parent device + unsigned int id; //!< Unique instance ID within the device + unsigned int profileId; //!< Unique profile ID within the device + nvmlGpuInstancePlacement_t placement; //!< Placement for this instance +} nvmlGpuInstanceInfo_t; + +typedef struct +{ + struct nvmlGpuInstance_st* handle; +} nvmlGpuInstance_t; + +/** + * Compute instance profiles. + * + * These macros should be passed to \ref nvmlGpuInstanceGetComputeInstanceProfileInfo to retrieve the + * detailed information about a compute instance such as profile ID, engine counts + */ +#define NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE 0x0 +#define NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE 0x1 +#define NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE 0x2 +#define NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE 0x3 +#define NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE 0x4 +#define NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE 0x5 +#define NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE 0x6 +#define NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 0x7 +#define NVML_COMPUTE_INSTANCE_PROFILE_COUNT 0x8 + +#define NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED 0x0 //!< All the engines except multiprocessors would be shared +#define NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT 0x1 + +typedef struct nvmlComputeInstancePlacement_st +{ + unsigned int start; //!< Index of first occupied compute slice + unsigned int size; //!< Number of compute slices occupied +} nvmlComputeInstancePlacement_t; + +/** + * Compute instance profile information. + */ +typedef struct nvmlComputeInstanceProfileInfo_st +{ + unsigned int id; //!< Unique profile ID within the GPU instance + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< Compute instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count +} nvmlComputeInstanceProfileInfo_t; + +/** + * Compute instance profile information (v2). + * + * Version 2 adds the \ref nvmlComputeInstanceProfileInfo_v2_t.version field + * to the start of the structure, and the \ref nvmlComputeInstanceProfileInfo_v2_t.name + * field to the end. This structure is not backwards-compatible with + * \ref nvmlComputeInstanceProfileInfo_t. + */ +typedef struct nvmlComputeInstanceProfileInfo_v2_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlComputeInstanceProfileInfo_v2) + unsigned int id; //!< Unique profile ID within the GPU instance + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< Compute instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name +} nvmlComputeInstanceProfileInfo_v2_t; + +/** + * Version identifier value for \ref nvmlComputeInstanceProfileInfo_v2_t.version. + */ +#define nvmlComputeInstanceProfileInfo_v2 NVML_STRUCT_VERSION(ComputeInstanceProfileInfo, 2) + +/** + * Compute instance profile information (v3). + * + * Version 3 adds the \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities field + * \ref nvmlComputeInstanceProfileInfo_t. + */ +typedef struct nvmlComputeInstanceProfileInfo_v3_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlComputeInstanceProfileInfo_v3) + unsigned int id; //!< Unique profile ID within the GPU instance + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< Compute instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name + unsigned int capabilities; //!< Additional capabilities +} nvmlComputeInstanceProfileInfo_v3_t; + +/** + * Version identifier value for \ref nvmlComputeInstanceProfileInfo_v3_t.version. + */ +#define nvmlComputeInstanceProfileInfo_v3 NVML_STRUCT_VERSION(ComputeInstanceProfileInfo, 3) + +typedef struct nvmlComputeInstanceInfo_st +{ + nvmlDevice_t device; //!< Parent device + nvmlGpuInstance_t gpuInstance; //!< Parent GPU instance + unsigned int id; //!< Unique instance ID within the GPU instance + unsigned int profileId; //!< Unique profile ID within the GPU instance + nvmlComputeInstancePlacement_t placement; //!< Placement for this instance within the GPU instance's compute slice range {0, sliceCount} +} nvmlComputeInstanceInfo_t; + +typedef struct +{ + struct nvmlComputeInstance_st* handle; +} nvmlComputeInstance_t; + +/** + * Set MIG mode for the device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root user. + * + * This mode determines whether a GPU instance can be created. + * + * This API may unbind or reset the device to activate the requested mode. Thus, the attributes associated with the + * device, such as minor number, might change. The caller of this API is expected to query such attributes again. + * + * On certain platforms like pass-through virtualization, where reset functionality may not be exposed directly, VM + * reboot is required. \a activationStatus would return \ref NVML_ERROR_RESET_REQUIRED for such cases. + * + * \a activationStatus would return the appropriate error code upon unsuccessful activation. For example, if device + * unbind fails because the device isn't idle, \ref NVML_ERROR_IN_USE would be returned. The caller of this API + * is expected to idle the device and retry setting the \a mode. + * + * @note On Windows, only disabling MIG mode is supported. \a activationStatus would return \ref + * NVML_ERROR_NOT_SUPPORTED as GPU reset is not supported on Windows through this API. + * + * @param device The identifier of the target device + * @param mode The mode to be set, \ref NVML_DEVICE_MIG_DISABLE or + * \ref NVML_DEVICE_MIG_ENABLE + * @param activationStatus The activationStatus status + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device,\a mode or \a activationStatus are invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode + */ +nvmlReturn_t DECLDIR nvmlDeviceSetMigMode(nvmlDevice_t device, unsigned int mode, nvmlReturn_t *activationStatus); + +/** + * Get MIG mode for the device. + * + * For Ampere &tm; or newer fully supported devices. + * + * Changing MIG modes may require device unbind or reset. The "pending" MIG mode refers to the target mode following the + * next activation trigger. + * + * @param device The identifier of the target device + * @param currentMode Returns the current mode, \ref NVML_DEVICE_MIG_DISABLE or + * \ref NVML_DEVICE_MIG_ENABLE + * @param pendingMode Returns the pending mode, \ref NVML_DEVICE_MIG_DISABLE or + * \ref NVML_DEVICE_MIG_ENABLE + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a currentMode or \a pendingMode are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int *currentMode, unsigned int *pendingMode); + +/** + * Get GPU instance profile information + * + * Information provided by this API is immutable throughout the lifetime of a MIG mode. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param profile One of the NVML_GPU_INSTANCE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile or \a info are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfo(nvmlDevice_t device, unsigned int profile, + nvmlGpuInstanceProfileInfo_t *info); + +/** + * Versioned wrapper around \ref nvmlDeviceGetGpuInstanceProfileInfo that accepts a versioned + * \ref nvmlGpuInstanceProfileInfo_v2_t or later output structure. + * + * @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the + * appropriate version prior to calling this function. For example: + * \code + * nvmlGpuInstanceProfileInfo_v2_t profileInfo = + * { .version = nvmlGpuInstanceProfileInfo_v2 }; + * nvmlReturn_t result = nvmlDeviceGetGpuInstanceProfileInfoV(device, + * profile, + * &profileInfo); + * \endcode + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param profile One of the NVML_GPU_INSTANCE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a info, or \a info->version are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, unsigned int profile, + nvmlGpuInstanceProfileInfo_v2_t *info); + +/** + * Get GPU instance placements. + * + * A placement represents the location of a GPU instance within a device. This API only returns all the possible + * placements for the given profile regardless of whether MIG is enabled or not. + * A created GPU instance occupies memory slices described by its placement. Creation of new GPU instance will + * fail if there is overlap with the already occupied memory slices. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param placements Returns placements allowed for the profile. Can be NULL to discover number + * of allowed placements for this profile. If non-NULL must be large enough + * to accommodate the placements supported by the profile. + * @param count Returns number of allowed placemenets for the profile. + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t device, unsigned int profileId, + nvmlGpuInstancePlacement_t *placements, + unsigned int *count); + +/** + * Get GPU instance profile capacity. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param count Returns remaining instance count for the profile ID + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t device, unsigned int profileId, + unsigned int *count); + +/** + * Create GPU instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would + * become invalid. The GPU instance must be recreated to acquire a valid handle. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param gpuInstance Returns the GPU instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId or \a gpuInstance are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created + */ +nvmlReturn_t DECLDIR nvmlDeviceCreateGpuInstance(nvmlDevice_t device, unsigned int profileId, + nvmlGpuInstance_t *gpuInstance); + +/** + * Create GPU instance with the specified placement. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would + * become invalid. The GPU instance must be recreated to acquire a valid handle. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param placement The requested placement. See \ref nvmlDeviceGetGpuInstancePossiblePlacements_v2 + * @param gpuInstance Returns the GPU instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId, \a placement or \a gpuInstance + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created + */ +nvmlReturn_t DECLDIR nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice_t device, unsigned int profileId, + const nvmlGpuInstancePlacement_t *placement, + nvmlGpuInstance_t *gpuInstance); +/** + * Destroy GPU instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The GPU instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_IN_USE If the GPU instance is in use. This error would be returned if processes + * (e.g. CUDA application) or compute instances are active on the + * GPU instance. + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceDestroy(nvmlGpuInstance_t gpuInstance); + +/** + * Get GPU instances for given profile ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param gpuInstances Returns pre-exiting GPU instances, the buffer must be large enough to + * accommodate the instances supported by the profile. + * See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param count The count of returned GPU instances + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId, \a gpuInstances or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstances(nvmlDevice_t device, unsigned int profileId, + nvmlGpuInstance_t *gpuInstances, unsigned int *count); + +/** + * Get GPU instances for given instance ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param id The GPU instance ID + * @param gpuInstance Returns GPU instance + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a id or \a gpuInstance are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_FOUND If the GPU instance is not found. + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceById(nvmlDevice_t device, unsigned int id, nvmlGpuInstance_t *gpuInstance); + +/** + * Get GPU instance information. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param gpuInstance The GPU instance handle + * @param info Return GPU instance information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance or \a info are invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetInfo(nvmlGpuInstance_t gpuInstance, nvmlGpuInstanceInfo_t *info); + +/** + * Get compute instance profile information. + * + * Information provided by this API is immutable throughout the lifetime of a MIG mode. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_* + * @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile or \a info are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceProfileInfo(nvmlGpuInstance_t gpuInstance, unsigned int profile, + unsigned int engProfile, + nvmlComputeInstanceProfileInfo_t *info); + +/** + * Versioned wrapper around \ref nvmlGpuInstanceGetComputeInstanceProfileInfo that accepts a versioned + * \ref nvmlComputeInstanceProfileInfo_v2_t or later output structure. + * + * @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the + * appropriate version prior to calling this function. For example: + * \code + * nvmlComputeInstanceProfileInfo_v2_t profileInfo = + * { .version = nvmlComputeInstanceProfileInfo_v2 }; + * nvmlReturn_t result = nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance, + * profile, + * engProfile, + * &profileInfo); + * \endcode + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_* + * @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile, \a info, or \a info->version are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance_t gpuInstance, unsigned int profile, + unsigned int engProfile, + nvmlComputeInstanceProfileInfo_v2_t *info); + +/** + * Get compute instance profile capacity. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param count Returns remaining instance count for the profile ID + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a availableCount are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance_t gpuInstance, + unsigned int profileId, unsigned int *count); + +/** + * Get compute instance placements. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * A placement represents the location of a compute instance within a GPU instance. This API only returns all the possible + * placements for the given profile. + * A created compute instance occupies compute slices described by its placement. Creation of new compute instance will + * fail if there is overlap with the already occupied compute slices. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param placements Returns placements allowed for the profile. Can be NULL to discover number + * of allowed placements for this profile. If non-NULL must be large enough + * to accommodate the placements supported by the profile. + * @param count Returns number of allowed placemenets for the profile. + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstancePossiblePlacements(nvmlGpuInstance_t gpuInstance, + unsigned int profileId, + nvmlComputeInstancePlacement_t *placements, + unsigned int *count); + +/** + * Create compute instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed + * explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire + * a valid handle. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param computeInstance Returns the compute instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance_t gpuInstance, unsigned int profileId, + nvmlComputeInstance_t *computeInstance); + +/** + * Create compute instance with the specified placement. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed + * explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire + * a valid handle. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param placement The requested placement. See \ref nvmlGpuInstanceGetComputeInstancePossiblePlacements + * @param computeInstance Returns the compute instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceCreateComputeInstanceWithPlacement(nvmlGpuInstance_t gpuInstance, unsigned int profileId, + const nvmlComputeInstancePlacement_t *placement, + nvmlComputeInstance_t *computeInstance); + +/** + * Destroy compute instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param computeInstance The compute instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance is invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_IN_USE If the compute instance is in use. This error would be returned if + * processes (e.g. CUDA application) are active on the compute instance. + */ +nvmlReturn_t DECLDIR nvmlComputeInstanceDestroy(nvmlComputeInstance_t computeInstance); + +/** + * Get compute instances for given profile ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param computeInstances Returns pre-exiting compute instances, the buffer must be large enough to + * accommodate the instances supported by the profile. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param count The count of returned compute instances + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId, \a computeInstances or \a count + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance_t gpuInstance, unsigned int profileId, + nvmlComputeInstance_t *computeInstances, unsigned int *count); + +/** + * Get compute instance for given instance ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The identifier of the target GPU instance + * @param id The compute instance ID + * @param computeInstance Returns compute instance + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a ID or \a computeInstance are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_FOUND If the compute instance is not found. + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t gpuInstance, unsigned int id, + nvmlComputeInstance_t *computeInstance); + +/** + * Get compute instance information. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param computeInstance The compute instance handle + * @param info Return compute instance information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance or \a info are invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t *info); + +/** + * Test if the given handle refers to a MIG device. + * + * A MIG device handle is an NVML abstraction which maps to a MIG compute instance. + * These overloaded references can be used (with some restrictions) interchangeably + * with a GPU device handle to execute queries at a per-compute instance granularity. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device NVML handle to test + * @param isMigDevice True when handle refers to a MIG device + * + * @return + * - \ref NVML_SUCCESS if \a device status was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle or \a isMigDevice reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceIsMigDeviceHandle(nvmlDevice_t device, unsigned int *isMigDevice); + +/** + * Get GPU instance ID for the given MIG device handle. + * + * GPU instance IDs are unique per device and remain valid until the GPU instance is destroyed. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Target MIG device handle + * @param id GPU instance ID + * + * @return + * - \ref NVML_SUCCESS if instance ID was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned int *id); + +/** + * Get compute instance ID for the given MIG device handle. + * + * Compute instance IDs are unique per GPU instance and remain valid until the compute instance + * is destroyed. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Target MIG device handle + * @param id Compute instance ID + * + * @return + * - \ref NVML_SUCCESS if instance ID was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned int *id); + +/** + * Get the maximum number of MIG devices that can exist under a given parent NVML device. + * + * Returns zero if MIG is not supported or enabled. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Target device handle + * @param count Count of MIG devices + * + * @return + * - \ref NVML_SUCCESS if \a count was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a count reference is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int *count); + +/** + * Get MIG device handle for the given index under its parent NVML device. + * + * If the compute instance is destroyed either explicitly or by destroying, + * resetting or unbinding the parent GPU instance or the GPU device itself + * the MIG device handle would remain invalid and must be requested again + * using this API. Handles may be reused and their properties can change in + * the process. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Reference to the parent GPU device handle + * @param index Index of the MIG device + * @param migDevice Reference to the MIG device handle + * + * @return + * - \ref NVML_SUCCESS if \a migDevice handle was successfully created + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a index or \a migDevice reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_NOT_FOUND if no valid MIG device was found at \a index + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, + nvmlDevice_t *migDevice); + +/** + * Get parent device handle from a MIG device handle. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param migDevice MIG device handle + * @param device Device handle + * + * @return + * - \ref NVML_SUCCESS if \a device handle was successfully created + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a migDevice or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t *device); + +/** @} */ // @defgroup nvmlMultiInstanceGPU + + +/***************************************************************************************************/ +/** @defgroup GPM NVML GPM + * @{ + */ +/***************************************************************************************************/ +/** @defgroup nvmlGpmEnums GPM Enums + * @{ + */ +/***************************************************************************************************/ + +/** + * GPM Metric Identifiers + */ +typedef enum +{ + NVML_GPM_METRIC_GRAPHICS_UTIL = 1, //!< Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0 + NVML_GPM_METRIC_SM_UTIL = 2, //!< Percentage of SMs that were busy. 0.0 - 100.0 + NVML_GPM_METRIC_SM_OCCUPANCY = 3, //!< Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0 + NVML_GPM_METRIC_INTEGER_UTIL = 4, //!< Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0 + NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5, //!< Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6, //!< Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7, //!< Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9, //!< Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_DRAM_BW_UTIL = 10, //!< Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0 */ + NVML_GPM_METRIC_FP64_UTIL = 11, //!< Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0 + NVML_GPM_METRIC_FP32_UTIL = 12, //!< Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0 + NVML_GPM_METRIC_FP16_UTIL = 13, //!< Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0 + NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20, //!< PCIe traffic from this GPU in MiB/sec + NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21, //!< PCIe traffic to this GPU in MiB/sec + NVML_GPM_METRIC_NVDEC_0_UTIL = 30, //!< Percent utilization of NVDEC 0. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_1_UTIL = 31, //!< Percent utilization of NVDEC 1. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_2_UTIL = 32, //!< Percent utilization of NVDEC 2. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_3_UTIL = 33, //!< Percent utilization of NVDEC 3. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_4_UTIL = 34, //!< Percent utilization of NVDEC 4. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_5_UTIL = 35, //!< Percent utilization of NVDEC 5. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_6_UTIL = 36, //!< Percent utilization of NVDEC 6. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_7_UTIL = 37, //!< Percent utilization of NVDEC 7. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_0_UTIL = 40, //!< Percent utilization of NVJPG 0. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_1_UTIL = 41, //!< Percent utilization of NVJPG 1. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_2_UTIL = 42, //!< Percent utilization of NVJPG 2. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_3_UTIL = 43, //!< Percent utilization of NVJPG 3. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_4_UTIL = 44, //!< Percent utilization of NVJPG 4. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_5_UTIL = 45, //!< Percent utilization of NVJPG 5. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_6_UTIL = 46, //!< Percent utilization of NVJPG 6. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_7_UTIL = 47, //!< Percent utilization of NVJPG 7. 0.0 - 100.0 + NVML_GPM_METRIC_NVOFA_0_UTIL = 50, //!< Percent utilization of NVOFA 0. 0.0 - 100.0 + NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60, //!< NvLink read bandwidth for all links in MiB/sec + NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61, //!< NvLink write bandwidth for all links in MiB/sec + NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62, //!< NvLink read bandwidth for link 0 in MiB/sec + NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63, //!< NvLink write bandwidth for link 0 in MiB/sec + NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64, //!< NvLink read bandwidth for link 1 in MiB/sec + NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65, //!< NvLink write bandwidth for link 1 in MiB/sec + NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66, //!< NvLink read bandwidth for link 2 in MiB/sec + NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67, //!< NvLink write bandwidth for link 2 in MiB/sec + NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68, //!< NvLink read bandwidth for link 3 in MiB/sec + NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69, //!< NvLink write bandwidth for link 3 in MiB/sec + NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70, //!< NvLink read bandwidth for link 4 in MiB/sec + NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71, //!< NvLink write bandwidth for link 4 in MiB/sec + NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72, //!< NvLink read bandwidth for link 5 in MiB/sec + NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73, //!< NvLink write bandwidth for link 5 in MiB/sec + NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74, //!< NvLink read bandwidth for link 6 in MiB/sec + NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75, //!< NvLink write bandwidth for link 6 in MiB/sec + NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76, //!< NvLink read bandwidth for link 7 in MiB/sec + NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77, //!< NvLink write bandwidth for link 7 in MiB/sec + NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78, //!< NvLink read bandwidth for link 8 in MiB/sec + NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79, //!< NvLink write bandwidth for link 8 in MiB/sec + NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80, //!< NvLink read bandwidth for link 9 in MiB/sec + NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81, //!< NvLink write bandwidth for link 9 in MiB/sec + NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82, //!< NvLink read bandwidth for link 10 in MiB/sec + NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83, //!< NvLink write bandwidth for link 10 in MiB/sec + NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84, //!< NvLink read bandwidth for link 11 in MiB/sec + NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85, //!< NvLink write bandwidth for link 11 in MiB/sec + NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86, //!< NvLink read bandwidth for link 12 in MiB/sec + NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87, //!< NvLink write bandwidth for link 12 in MiB/sec + NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88, //!< NvLink read bandwidth for link 13 in MiB/sec + NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89, //!< NvLink write bandwidth for link 13 in MiB/sec + NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90, //!< NvLink read bandwidth for link 14 in MiB/sec + NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91, //!< NvLink write bandwidth for link 14 in MiB/sec + NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92, //!< NvLink read bandwidth for link 15 in MiB/sec + NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93, //!< NvLink write bandwidth for link 15 in MiB/sec + NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94, //!< NvLink read bandwidth for link 16 in MiB/sec + NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95, //!< NvLink write bandwidth for link 16 in MiB/sec + NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96, //!< NvLink read bandwidth for link 17 in MiB/sec + NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97, //!< NvLink write bandwidth for link 17 in MiB/sec + NVML_GPM_METRIC_MAX = 98, //!< Maximum value above +1. Note that changing this should also change NVML_GPM_METRICS_GET_VERSION due to struct size change +} nvmlGpmMetricId_t; + +/** @} */ // @defgroup nvmlGpmEnums + + +/***************************************************************************************************/ +/** @defgroup nvmlGpmStructs GPM Structs + * @{ + */ +/***************************************************************************************************/ + +/** + * Handle to an allocated GPM sample allocated with nvmlGpmSampleAlloc(). Free this with nvmlGpmSampleFree(). + */ +typedef struct +{ + struct nvmlGpmSample_st* handle; +} nvmlGpmSample_t; + +typedef struct { + char *shortName; + char *longName; + char *unit; +} nvmlGpmMetricMetricInfo_t; + +/** + * GPM metric information. + */ +typedef struct +{ + unsigned int metricId; //!< IN: NVML_GPM_METRIC_? #define of which metric to retrieve + nvmlReturn_t nvmlReturn; //!< OUT: Status of this metric. If this is nonzero, then value is not valid + double value; //!< OUT: Value of this metric. Is only valid if nvmlReturn is 0 (NVML_SUCCESS) + nvmlGpmMetricMetricInfo_t metricInfo; //!< OUT: Metric name and unit. Those can be NULL if not defined +} nvmlGpmMetric_t; + +/** + * GPM buffer information. + */ +typedef struct +{ + unsigned int version; //!< IN: Set to NVML_GPM_METRICS_GET_VERSION + unsigned int numMetrics; //!< IN: How many metrics to retrieve in metrics[] + nvmlGpmSample_t sample1; //!< IN: Sample buffer + nvmlGpmSample_t sample2; //!< IN: Sample buffer + nvmlGpmMetric_t metrics[NVML_GPM_METRIC_MAX]; //!< IN/OUT: Array of metrics. Set metricId on call. See nvmlReturn and value on return +} nvmlGpmMetricsGet_t; + +#define NVML_GPM_METRICS_GET_VERSION 1 + +/** + * GPM device information. + */ +typedef struct +{ + unsigned int version; //!< IN: Set to NVML_GPM_SUPPORT_VERSION + unsigned int isSupportedDevice; //!< OUT: Indicates device support +} nvmlGpmSupport_t; + +#define NVML_GPM_SUPPORT_VERSION 1 + +/** @} */ // @defgroup nvmlGPMStructs + +/***************************************************************************************************/ +/** @defgroup nvmlGpmFunctions GPM Functions + * @{ + */ +/***************************************************************************************************/ + +/** + * Calculate GPM metrics from two samples. + * + * For Hopper &tm; or newer fully supported devices. + * + * @param metricsGet IN/OUT: populated \a nvmlGpmMetricsGet_t struct + * + * @return + * - \ref NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum on error + */ +nvmlReturn_t DECLDIR nvmlGpmMetricsGet(nvmlGpmMetricsGet_t *metricsGet); + + +/** + * Free an allocated sample buffer that was allocated with \ref nvmlGpmSampleAlloc() + * + * For Hopper &tm; or newer fully supported devices. + * + * @param gpmSample Sample to free + * + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided + */ +nvmlReturn_t DECLDIR nvmlGpmSampleFree(nvmlGpmSample_t gpmSample); + + +/** + * Allocate a sample buffer to be used with NVML GPM . You will need to allocate + * at least two of these buffers to use with the NVML GPM feature + * + * For Hopper &tm; or newer fully supported devices. + * + * @param gpmSample Where the allocated sample will be stored + * + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided + * - \ref NVML_ERROR_MEMORY if system memory is insufficient + */ +nvmlReturn_t DECLDIR nvmlGpmSampleAlloc(nvmlGpmSample_t *gpmSample); + +/** + * Read a sample of GPM metrics into the provided \a gpmSample buffer. After + * two samples are gathered, you can call nvmlGpmMetricGet on those samples to + * retrive metrics + * + * For Hopper &tm; or newer fully supported devices. + * + * @param device Device to get samples for + * @param gpmSample Buffer to read samples into + * + * @return + * - \ref NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum on error + */ +nvmlReturn_t DECLDIR nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample); + +/** + * Read a sample of GPM metrics into the provided \a gpmSample buffer for a MIG GPU Instance. + * + * After two samples are gathered, you can call nvmlGpmMetricGet on those + * samples to retrive metrics + * + * For Hopper &tm; or newer fully supported devices. + * + * @param device Device to get samples for + * @param gpuInstanceId MIG GPU Instance ID + * @param gpmSample Buffer to read samples into + * + * @return + * - \ref NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum on error + */ +nvmlReturn_t DECLDIR nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample); + +/** + * Indicate whether the supplied device supports GPM + * + * @param device NVML device to query for + * @param gpmSupport Structure to indicate GPM support \a nvmlGpmSupport_t. Indicates + * GPM support per system for the supplied device + * + * @return + * - NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum if there is an error in processing the query + */ +nvmlReturn_t DECLDIR nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t *gpmSupport); + +/* GPM Stream State */ +/** + * Get GPM stream state. + * + * %HOPPER_OR_NEWER% + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param state Returns GPM stream state + * NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED + * + * @return + * - \ref NVML_SUCCESS if \a current GPM stream state were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a state is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int *state); + +/** + * Set GPM stream state. + * + * %HOPPER_OR_NEWER% + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param state GPM stream state, + * NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED + * + * @return + * - \ref NVML_SUCCESS if \a current GPM stream state is successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state); + +/** @} */ // @defgroup nvmlGpmFunctions +/** @} */ // @defgroup GPM + +#define NVML_NVLINK_POWER_STATE_HIGH_SPEED 0x0 +#define NVML_NVLINK_POWER_STATE_LOW 0x1 + +#define NVML_NVLINK_LOW_POWER_THRESHOLD_MIN 0x1 +#define NVML_NVLINK_LOW_POWER_THRESHOLD_MAX 0x1FFF +#define NVML_NVLINK_LOW_POWER_THRESHOLD_RESET 0xFFFFFFFF + +/* Structure containing Low Power parameters */ +typedef struct nvmlNvLinkPowerThres_st +{ + unsigned int lowPwrThreshold; //!< Low power threshold (in units of 100us) +} nvmlNvLinkPowerThres_t; + +/** + * Set NvLink Low Power Threshold for device. + * + * %HOPPER_OR_NEWER% + * + * @param device The identifier of the target device + * @param info Reference to \a nvmlNvLinkPowerThres_t struct + * input parameters + * + * @return + * - \ref NVML_SUCCESS if the \a Threshold is successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a Threshold is not within range + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * + **/ +nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t device, nvmlNvLinkPowerThres_t *info); + +/** + * Set the global nvlink bandwith mode + * + * @param nvlinkBwMode nvlink bandwidth mode + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid argument is provided + * - \ref NVML_ERROR_IN_USE if P2P object exists + * - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture. + * - \ref NVML_ERROR_NO_PERMISSION if not root user + */ +nvmlReturn_t DECLDIR nvmlSystemSetNvlinkBwMode(unsigned int nvlinkBwMode); + +/** + * Get the global nvlink bandwith mode + * + * @param nvlinkBwMode reference of nvlink bandwidth mode + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided + * - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture. + * - \ref NVML_ERROR_NO_PERMISSION if not root user + */ +nvmlReturn_t DECLDIR nvmlSystemGetNvlinkBwMode(unsigned int *nvlinkBwMode); + +/** + * Set new power limit of this device. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. + * + * See \ref nvmlPowerValue_v2_t for more information on the struct. + * + * \note Limit is not persistent across reboots or driver unloads. + * Enable persistent mode to prevent driver from unloading when no application is using the device. + * + * This API replaces nvmlDeviceSetPowerManagementLimit. It can be used as a drop-in replacement for the older version. + * + * @param device The identifier of the target device + * @param powerValue Power management limit in milliwatts to set + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a powerValue is NULL or contains invalid values + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see NVML_FI_DEV_POWER_AVERAGE + * @see NVML_FI_DEV_POWER_INSTANT + * @see NVML_FI_DEV_POWER_MIN_LIMIT + * @see NVML_FI_DEV_POWER_MAX_LIMIT + * @see NVML_FI_DEV_POWER_CURRENT_LIMIT + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice_t device, nvmlPowerValue_v2_t *powerValue); + +/** + * Get SRAM ECC error status of this device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlEccSramErrorStatus_v1_t for more information on the struct. + * + * @param device The identifier of the target device + * @param status Returns SRAM ECC error status + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counters is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_VERSION_MISMATCH if the version of \a nvmlEccSramErrorStatus_t is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSramEccErrorStatus(nvmlDevice_t device, + nvmlEccSramErrorStatus_t *status); +/** + * NVML API versioning support + */ + +#ifdef NVML_NO_UNVERSIONED_FUNC_DEFS +nvmlReturn_t DECLDIR nvmlInit(void); +nvmlReturn_t DECLDIR nvmlDeviceGetCount(unsigned int *deviceCount); +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device); +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId(const char *pciBusId, nvmlDevice_t *device); +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo(nvmlDevice_t device, nvmlPciInfo_t *pci); +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo_v2(nvmlDevice_t device, nvmlPciInfo_t *pci); +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v2(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v3(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu(nvmlPciInfo_t *pciInfo); +nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); +nvmlReturn_t DECLDIR nvmlDeviceGetAttributes(nvmlDevice_t device, nvmlDeviceAttributes_t *attributes); +nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t *info); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstancePlacement_t *placements, unsigned int *count); +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t *licenseInfo); +#endif // #ifdef NVML_NO_UNVERSIONED_FUNC_DEFS + +#if defined(NVML_NO_UNVERSIONED_FUNC_DEFS) +// We don't define APIs to run new versions if this guard is present so there is +// no need to undef +#elif defined(__NVML_API_VERSION_INTERNAL) +#undef nvmlDeviceGetGraphicsRunningProcesses +#undef nvmlDeviceGetComputeRunningProcesses +#undef nvmlDeviceGetMPSComputeRunningProcesses +#undef nvmlDeviceGetAttributes +#undef nvmlComputeInstanceGetInfo +#undef nvmlEventSetWait +#undef nvmlDeviceGetGridLicensableFeatures +#undef nvmlDeviceRemoveGpu +#undef nvmlDeviceGetNvLinkRemotePciInfo +#undef nvmlDeviceGetPciInfo +#undef nvmlDeviceGetCount +#undef nvmlDeviceGetHandleByIndex +#undef nvmlDeviceGetHandleByPciBusId +#undef nvmlInit +#undef nvmlBlacklistDeviceInfo_t +#undef nvmlGetBlacklistDeviceCount +#undef nvmlGetBlacklistDeviceInfoByIndex +#undef nvmlDeviceGetGpuInstancePossiblePlacements +#undef nvmlVgpuInstanceGetLicenseInfo +#undef nvmlDeviceSetPowerManagementLimit + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/refcount.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/refcount.go new file mode 100644 index 00000000000..4d1e212ea4a --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/refcount.go @@ -0,0 +1,31 @@ +/** +# Copyright 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package nvml + +type refcount int + +func (r *refcount) IncOnNoError(err error) { + if err == nil { + (*r)++ + } +} + +func (r *refcount) DecOnNoError(err error) { + if err == nil && (*r) > 0 { + (*r)-- + } +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go new file mode 100644 index 00000000000..1aec3ecc304 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go @@ -0,0 +1,103 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "fmt" +) + +// nvml.ErrorString() +func (l *library) ErrorString(r Return) string { + return r.Error() +} + +// String returns the string representation of a Return. +func (r Return) String() string { + return r.Error() +} + +// Error returns the string representation of a Return. +func (r Return) Error() string { + return errorStringFunc(r) +} + +// Assigned to nvml.ErrorString if the system nvml library is in use. +var errorStringFunc = defaultErrorStringFunc + +// defaultErrorStringFunc provides a basic nvmlErrorString implementation. +// This allows the nvml.ErrorString function to be used even if the NVML library +// is not loaded. +var defaultErrorStringFunc = func(r Return) string { + switch r { + case SUCCESS: + return "SUCCESS" + case ERROR_UNINITIALIZED: + return "ERROR_UNINITIALIZED" + case ERROR_INVALID_ARGUMENT: + return "ERROR_INVALID_ARGUMENT" + case ERROR_NOT_SUPPORTED: + return "ERROR_NOT_SUPPORTED" + case ERROR_NO_PERMISSION: + return "ERROR_NO_PERMISSION" + case ERROR_ALREADY_INITIALIZED: + return "ERROR_ALREADY_INITIALIZED" + case ERROR_NOT_FOUND: + return "ERROR_NOT_FOUND" + case ERROR_INSUFFICIENT_SIZE: + return "ERROR_INSUFFICIENT_SIZE" + case ERROR_INSUFFICIENT_POWER: + return "ERROR_INSUFFICIENT_POWER" + case ERROR_DRIVER_NOT_LOADED: + return "ERROR_DRIVER_NOT_LOADED" + case ERROR_TIMEOUT: + return "ERROR_TIMEOUT" + case ERROR_IRQ_ISSUE: + return "ERROR_IRQ_ISSUE" + case ERROR_LIBRARY_NOT_FOUND: + return "ERROR_LIBRARY_NOT_FOUND" + case ERROR_FUNCTION_NOT_FOUND: + return "ERROR_FUNCTION_NOT_FOUND" + case ERROR_CORRUPTED_INFOROM: + return "ERROR_CORRUPTED_INFOROM" + case ERROR_GPU_IS_LOST: + return "ERROR_GPU_IS_LOST" + case ERROR_RESET_REQUIRED: + return "ERROR_RESET_REQUIRED" + case ERROR_OPERATING_SYSTEM: + return "ERROR_OPERATING_SYSTEM" + case ERROR_LIB_RM_VERSION_MISMATCH: + return "ERROR_LIB_RM_VERSION_MISMATCH" + case ERROR_IN_USE: + return "ERROR_IN_USE" + case ERROR_MEMORY: + return "ERROR_MEMORY" + case ERROR_NO_DATA: + return "ERROR_NO_DATA" + case ERROR_VGPU_ECC_NOT_SUPPORTED: + return "ERROR_VGPU_ECC_NOT_SUPPORTED" + case ERROR_INSUFFICIENT_RESOURCES: + return "ERROR_INSUFFICIENT_RESOURCES" + case ERROR_FREQ_NOT_SUPPORTED: + return "ERROR_FREQ_NOT_SUPPORTED" + case ERROR_ARGUMENT_VERSION_MISMATCH: + return "ERROR_ARGUMENT_VERSION_MISMATCH" + case ERROR_DEPRECATED: + return "ERROR_DEPRECATED" + case ERROR_UNKNOWN: + return "ERROR_UNKNOWN" + default: + return fmt.Sprintf("unknown return value: %d", r) + } +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go new file mode 100644 index 00000000000..bee3964152c --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go @@ -0,0 +1,138 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.SystemGetDriverVersion() +func (l *library) SystemGetDriverVersion() (string, Return) { + Version := make([]byte, SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + ret := nvmlSystemGetDriverVersion(&Version[0], SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +// nvml.SystemGetNVMLVersion() +func (l *library) SystemGetNVMLVersion() (string, Return) { + Version := make([]byte, SYSTEM_NVML_VERSION_BUFFER_SIZE) + ret := nvmlSystemGetNVMLVersion(&Version[0], SYSTEM_NVML_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +// nvml.SystemGetCudaDriverVersion() +func (l *library) SystemGetCudaDriverVersion() (int, Return) { + var CudaDriverVersion int32 + ret := nvmlSystemGetCudaDriverVersion(&CudaDriverVersion) + return int(CudaDriverVersion), ret +} + +// nvml.SystemGetCudaDriverVersion_v2() +func (l *library) SystemGetCudaDriverVersion_v2() (int, Return) { + var CudaDriverVersion int32 + ret := nvmlSystemGetCudaDriverVersion_v2(&CudaDriverVersion) + return int(CudaDriverVersion), ret +} + +// nvml.SystemGetProcessName() +func (l *library) SystemGetProcessName(pid int) (string, Return) { + name := make([]byte, SYSTEM_PROCESS_NAME_BUFFER_SIZE) + ret := nvmlSystemGetProcessName(uint32(pid), &name[0], SYSTEM_PROCESS_NAME_BUFFER_SIZE) + return string(name[:clen(name)]), ret +} + +// nvml.SystemGetHicVersion() +func (l *library) SystemGetHicVersion() ([]HwbcEntry, Return) { + var hwbcCount uint32 = 1 // Will be reduced upon returning + for { + hwbcEntries := make([]HwbcEntry, hwbcCount) + ret := nvmlSystemGetHicVersion(&hwbcCount, &hwbcEntries[0]) + if ret == SUCCESS { + return hwbcEntries[:hwbcCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + hwbcCount *= 2 + } +} + +// nvml.SystemGetTopologyGpuSet() +func (l *library) SystemGetTopologyGpuSet(cpuNumber int) ([]Device, Return) { + var count uint32 + ret := nvmlSystemGetTopologyGpuSet(uint32(cpuNumber), &count, nil) + if ret != SUCCESS { + return nil, ret + } + if count == 0 { + return []Device{}, ret + } + deviceArray := make([]nvmlDevice, count) + ret = nvmlSystemGetTopologyGpuSet(uint32(cpuNumber), &count, &deviceArray[0]) + return convertSlice[nvmlDevice, Device](deviceArray), ret +} + +// nvml.SystemGetConfComputeCapabilities() +func (l *library) SystemGetConfComputeCapabilities() (ConfComputeSystemCaps, Return) { + var capabilities ConfComputeSystemCaps + ret := nvmlSystemGetConfComputeCapabilities(&capabilities) + return capabilities, ret +} + +// nvml.SystemGetConfComputeState() +func SystemGetConfComputeState() (ConfComputeSystemState, Return) { + var state ConfComputeSystemState + ret := nvmlSystemGetConfComputeState(&state) + return state, ret +} + +// nvml.SystemGetConfComputeGpusReadyState() +func SystemGetConfComputeGpusReadyState() (uint32, Return) { + var isAcceptingWork uint32 + ret := nvmlSystemGetConfComputeGpusReadyState(&isAcceptingWork) + return isAcceptingWork, ret +} + +// nvml.SystemSetConfComputeGpusReadyState() +func SystemSetConfComputeGpusReadyState(isAcceptingWork uint32) Return { + return nvmlSystemSetConfComputeGpusReadyState(isAcceptingWork) +} + +// nvml.SystemSetNvlinkBwMode() +func SystemSetNvlinkBwMode(nvlinkBwMode uint32) Return { + return nvmlSystemSetNvlinkBwMode(nvlinkBwMode) +} + +// nvml.SystemGetNvlinkBwMode() +func SystemGetNvlinkBwMode() (uint32, Return) { + var nvlinkBwMode uint32 + ret := nvmlSystemGetNvlinkBwMode(&nvlinkBwMode) + return nvlinkBwMode, ret +} + +// nvml.SystemGetConfComputeKeyRotationThresholdInfo() +func (l *library) SystemGetConfComputeKeyRotationThresholdInfo() (ConfComputeGetKeyRotationThresholdInfo, Return) { + var keyRotationThresholdInfo ConfComputeGetKeyRotationThresholdInfo + ret := nvmlSystemGetConfComputeKeyRotationThresholdInfo(&keyRotationThresholdInfo) + return keyRotationThresholdInfo, ret +} + +// nvml.SystemGetConfComputeSettings() +func (l *library) SystemGetConfComputeSettings() (SystemConfComputeSettings, Return) { + var settings SystemConfComputeSettings + ret := nvmlSystemGetConfComputeSettings(&settings) + return settings, ret +} + +// nvml.SystemSetConfComputeKeyRotationThresholdInfo() +func (l *library) SystemSetConfComputeKeyRotationThresholdInfo(keyRotationThresholdInfo ConfComputeSetKeyRotationThresholdInfo) Return { + return nvmlSystemSetConfComputeKeyRotationThresholdInfo(&keyRotationThresholdInfo) +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go new file mode 100644 index 00000000000..6ee33a6aba8 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go @@ -0,0 +1,908 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types.go + +package nvml + +import "unsafe" + +type nvmlDevice struct { + Handle *_Ctype_struct_nvmlDevice_st +} + +type PciInfoExt_v1 struct { + Version uint32 + Domain uint32 + Bus uint32 + Device uint32 + PciDeviceId uint32 + PciSubSystemId uint32 + BaseClass uint32 + SubClass uint32 + BusId [32]int8 +} + +type PciInfoExt struct { + Version uint32 + Domain uint32 + Bus uint32 + Device uint32 + PciDeviceId uint32 + PciSubSystemId uint32 + BaseClass uint32 + SubClass uint32 + BusId [32]int8 +} + +type PciInfo struct { + BusIdLegacy [16]int8 + Domain uint32 + Bus uint32 + Device uint32 + PciDeviceId uint32 + PciSubSystemId uint32 + BusId [32]int8 +} + +type EccErrorCounts struct { + L1Cache uint64 + L2Cache uint64 + DeviceMemory uint64 + RegisterFile uint64 +} + +type Utilization struct { + Gpu uint32 + Memory uint32 +} + +type Memory struct { + Total uint64 + Free uint64 + Used uint64 +} + +type Memory_v2 struct { + Version uint32 + Total uint64 + Reserved uint64 + Free uint64 + Used uint64 +} + +type BAR1Memory struct { + Bar1Total uint64 + Bar1Free uint64 + Bar1Used uint64 +} + +type ProcessInfo_v1 struct { + Pid uint32 + UsedGpuMemory uint64 +} + +type ProcessInfo_v2 struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type ProcessInfo struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type ProcessDetail_v1 struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 + UsedGpuCcProtectedMemory uint64 +} + +type ProcessDetailList_v1 struct { + Version uint32 + Mode uint32 + NumProcArrayEntries uint32 + ProcArray *ProcessDetail_v1 +} + +type ProcessDetailList struct { + Version uint32 + Mode uint32 + NumProcArrayEntries uint32 + ProcArray *ProcessDetail_v1 +} + +type DeviceAttributes struct { + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + GpuInstanceSliceCount uint32 + ComputeInstanceSliceCount uint32 + MemorySizeMB uint64 +} + +type C2cModeInfo_v1 struct { + IsC2cEnabled uint32 +} + +type RowRemapperHistogramValues struct { + Max uint32 + High uint32 + Partial uint32 + Low uint32 + None uint32 +} + +type NvLinkUtilizationControl struct { + Units uint32 + Pktfilter uint32 +} + +type BridgeChipInfo struct { + Type uint32 + FwVersion uint32 +} + +type BridgeChipHierarchy struct { + BridgeCount uint8 + BridgeChipInfo [128]BridgeChipInfo +} + +const sizeofValue = unsafe.Sizeof([8]byte{}) + +type Value [sizeofValue]byte + +type Sample struct { + TimeStamp uint64 + SampleValue [8]byte +} + +type ViolationTime struct { + ReferenceTime uint64 + ViolationTime uint64 +} + +type GpuThermalSettingsSensor struct { + Controller int32 + DefaultMinTemp int32 + DefaultMaxTemp int32 + CurrentTemp int32 + Target int32 +} + +type GpuThermalSettings struct { + Count uint32 + Sensor [3]GpuThermalSettingsSensor +} + +type ClkMonFaultInfo struct { + ClkApiDomain uint32 + ClkDomainFaultMask uint32 +} + +type ClkMonStatus struct { + BGlobalStatus uint32 + ClkMonListSize uint32 + ClkMonList [32]ClkMonFaultInfo +} + +type nvmlVgpuTypeId uint32 + +type nvmlVgpuInstance uint32 + +type VgpuHeterogeneousMode_v1 struct { + Version uint32 + Mode uint32 +} + +type VgpuHeterogeneousMode struct { + Version uint32 + Mode uint32 +} + +type VgpuPlacementId_v1 struct { + Version uint32 + PlacementId uint32 +} + +type VgpuPlacementId struct { + Version uint32 + PlacementId uint32 +} + +type VgpuPlacementList_v1 struct { + Version uint32 + PlacementSize uint32 + Count uint32 + PlacementIds *uint32 +} + +type VgpuPlacementList struct { + Version uint32 + PlacementSize uint32 + Count uint32 + PlacementIds *uint32 +} + +type VgpuInstanceUtilizationSample struct { + VgpuInstance uint32 + TimeStamp uint64 + SmUtil [8]byte + MemUtil [8]byte + EncUtil [8]byte + DecUtil [8]byte +} + +type VgpuInstanceUtilizationInfo_v1 struct { + TimeStamp uint64 + VgpuInstance uint32 + Pad_cgo_0 [4]byte + SmUtil [8]byte + MemUtil [8]byte + EncUtil [8]byte + DecUtil [8]byte + JpgUtil [8]byte + OfaUtil [8]byte +} + +type VgpuInstancesUtilizationInfo_v1 struct { + Version uint32 + SampleValType uint32 + VgpuInstanceCount uint32 + LastSeenTimeStamp uint64 + VgpuUtilArray *VgpuInstanceUtilizationInfo_v1 +} + +type VgpuInstancesUtilizationInfo struct { + Version uint32 + SampleValType uint32 + VgpuInstanceCount uint32 + LastSeenTimeStamp uint64 + VgpuUtilArray *VgpuInstanceUtilizationInfo_v1 +} + +type VgpuProcessUtilizationSample struct { + VgpuInstance uint32 + Pid uint32 + ProcessName [64]int8 + TimeStamp uint64 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 +} + +type VgpuProcessUtilizationInfo_v1 struct { + ProcessName [64]int8 + TimeStamp uint64 + VgpuInstance uint32 + Pid uint32 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 + JpgUtil uint32 + OfaUtil uint32 +} + +type VgpuProcessesUtilizationInfo_v1 struct { + Version uint32 + VgpuProcessCount uint32 + LastSeenTimeStamp uint64 + VgpuProcUtilArray *VgpuProcessUtilizationInfo_v1 +} + +type VgpuProcessesUtilizationInfo struct { + Version uint32 + VgpuProcessCount uint32 + LastSeenTimeStamp uint64 + VgpuProcUtilArray *VgpuProcessUtilizationInfo_v1 +} + +type VgpuSchedulerParamsVgpuSchedDataWithARR struct { + AvgFactor uint32 + Timeslice uint32 +} + +type VgpuSchedulerParamsVgpuSchedData struct { + Timeslice uint32 +} + +const sizeofVgpuSchedulerParams = unsafe.Sizeof([8]byte{}) + +type VgpuSchedulerParams [sizeofVgpuSchedulerParams]byte + +type VgpuSchedulerLogEntry struct { + Timestamp uint64 + TimeRunTotal uint64 + TimeRun uint64 + SwRunlistId uint32 + TargetTimeSlice uint64 + CumulativePreemptionTime uint64 +} + +type VgpuSchedulerLog struct { + EngineId uint32 + SchedulerPolicy uint32 + ArrMode uint32 + SchedulerParams [8]byte + EntriesCount uint32 + LogEntries [200]VgpuSchedulerLogEntry +} + +type VgpuSchedulerGetState struct { + SchedulerPolicy uint32 + ArrMode uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerSetParamsVgpuSchedDataWithARR struct { + AvgFactor uint32 + Frequency uint32 +} + +type VgpuSchedulerSetParamsVgpuSchedData struct { + Timeslice uint32 +} + +const sizeofVgpuSchedulerSetParams = unsafe.Sizeof([8]byte{}) + +type VgpuSchedulerSetParams [sizeofVgpuSchedulerSetParams]byte + +type VgpuSchedulerSetState struct { + SchedulerPolicy uint32 + EnableARRMode uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerCapabilities struct { + SupportedSchedulers [3]uint32 + MaxTimeslice uint32 + MinTimeslice uint32 + IsArrModeSupported uint32 + MaxFrequencyForARR uint32 + MinFrequencyForARR uint32 + MaxAvgFactorForARR uint32 + MinAvgFactorForARR uint32 +} + +type VgpuLicenseExpiry struct { + Year uint32 + Month uint16 + Day uint16 + Hour uint16 + Min uint16 + Sec uint16 + Status uint8 + Pad_cgo_0 [1]byte +} + +type VgpuLicenseInfo struct { + IsLicensed uint8 + LicenseExpiry VgpuLicenseExpiry + CurrentState uint32 +} + +type ProcessUtilizationSample struct { + Pid uint32 + TimeStamp uint64 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 +} + +type ProcessUtilizationInfo_v1 struct { + TimeStamp uint64 + Pid uint32 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 + JpgUtil uint32 + OfaUtil uint32 + Pad_cgo_0 [4]byte +} + +type ProcessesUtilizationInfo_v1 struct { + Version uint32 + ProcessSamplesCount uint32 + LastSeenTimeStamp uint64 + ProcUtilArray *ProcessUtilizationInfo_v1 +} + +type ProcessesUtilizationInfo struct { + Version uint32 + ProcessSamplesCount uint32 + LastSeenTimeStamp uint64 + ProcUtilArray *ProcessUtilizationInfo_v1 +} + +type GridLicenseExpiry struct { + Year uint32 + Month uint16 + Day uint16 + Hour uint16 + Min uint16 + Sec uint16 + Status uint8 + Pad_cgo_0 [1]byte +} + +type GridLicensableFeature struct { + FeatureCode uint32 + FeatureState uint32 + LicenseInfo [128]int8 + ProductName [128]int8 + FeatureEnabled uint32 + LicenseExpiry GridLicenseExpiry +} + +type GridLicensableFeatures struct { + IsGridLicenseSupported int32 + LicensableFeaturesCount uint32 + GridLicensableFeatures [3]GridLicensableFeature +} + +type EccSramErrorStatus_v1 struct { + Version uint32 + AggregateUncParity uint64 + AggregateUncSecDed uint64 + AggregateCor uint64 + VolatileUncParity uint64 + VolatileUncSecDed uint64 + VolatileCor uint64 + AggregateUncBucketL2 uint64 + AggregateUncBucketSm uint64 + AggregateUncBucketPcie uint64 + AggregateUncBucketMcu uint64 + AggregateUncBucketOther uint64 + BThresholdExceeded uint32 + Pad_cgo_0 [4]byte +} + +type EccSramErrorStatus struct { + Version uint32 + AggregateUncParity uint64 + AggregateUncSecDed uint64 + AggregateCor uint64 + VolatileUncParity uint64 + VolatileUncSecDed uint64 + VolatileCor uint64 + AggregateUncBucketL2 uint64 + AggregateUncBucketSm uint64 + AggregateUncBucketPcie uint64 + AggregateUncBucketMcu uint64 + AggregateUncBucketOther uint64 + BThresholdExceeded uint32 + Pad_cgo_0 [4]byte +} + +type DeviceArchitecture uint32 + +type BusType uint32 + +type FanControlPolicy uint32 + +type PowerSource uint32 + +type GpuDynamicPstatesInfoUtilization struct { + BIsPresent uint32 + Percentage uint32 + IncThreshold uint32 + DecThreshold uint32 +} + +type GpuDynamicPstatesInfo struct { + Flags uint32 + Utilization [8]GpuDynamicPstatesInfoUtilization +} + +type FieldValue struct { + FieldId uint32 + ScopeId uint32 + Timestamp int64 + LatencyUsec int64 + ValueType uint32 + NvmlReturn uint32 + Value [8]byte +} + +type nvmlUnit struct { + Handle *_Ctype_struct_nvmlUnit_st +} + +type HwbcEntry struct { + HwbcId uint32 + FirmwareVersion [32]int8 +} + +type LedState struct { + Cause [256]int8 + Color uint32 +} + +type UnitInfo struct { + Name [96]int8 + Id [96]int8 + Serial [96]int8 + FirmwareVersion [96]int8 +} + +type PSUInfo struct { + State [256]int8 + Current uint32 + Voltage uint32 + Power uint32 +} + +type UnitFanInfo struct { + Speed uint32 + State uint32 +} + +type UnitFanSpeeds struct { + Fans [24]UnitFanInfo + Count uint32 +} + +type nvmlEventSet struct { + Handle *_Ctype_struct_nvmlEventSet_st +} + +type nvmlEventData struct { + Device nvmlDevice + EventType uint64 + EventData uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type AccountingStats struct { + GpuUtilization uint32 + MemoryUtilization uint32 + MaxMemoryUsage uint64 + Time uint64 + StartTime uint64 + IsRunning uint32 + Reserved [5]uint32 +} + +type EncoderSessionInfo struct { + SessionId uint32 + Pid uint32 + VgpuInstance uint32 + CodecType uint32 + HResolution uint32 + VResolution uint32 + AverageFps uint32 + AverageLatency uint32 +} + +type FBCStats struct { + SessionsCount uint32 + AverageFPS uint32 + AverageLatency uint32 +} + +type FBCSessionInfo struct { + SessionId uint32 + Pid uint32 + VgpuInstance uint32 + DisplayOrdinal uint32 + SessionType uint32 + SessionFlags uint32 + HMaxResolution uint32 + VMaxResolution uint32 + HResolution uint32 + VResolution uint32 + AverageFPS uint32 + AverageLatency uint32 +} + +type ConfComputeSystemCaps struct { + CpuCaps uint32 + GpusCaps uint32 +} + +type ConfComputeSystemState struct { + Environment uint32 + CcFeature uint32 + DevToolsMode uint32 +} + +type SystemConfComputeSettings_v1 struct { + Version uint32 + Environment uint32 + CcFeature uint32 + DevToolsMode uint32 + MultiGpuMode uint32 +} + +type SystemConfComputeSettings struct { + Version uint32 + Environment uint32 + CcFeature uint32 + DevToolsMode uint32 + MultiGpuMode uint32 +} + +type ConfComputeMemSizeInfo struct { + ProtectedMemSizeKib uint64 + UnprotectedMemSizeKib uint64 +} + +type ConfComputeGpuCertificate struct { + CertChainSize uint32 + AttestationCertChainSize uint32 + CertChain [4096]uint8 + AttestationCertChain [5120]uint8 +} + +type ConfComputeGpuAttestationReport struct { + IsCecAttestationReportPresent uint32 + AttestationReportSize uint32 + CecAttestationReportSize uint32 + Nonce [32]uint8 + AttestationReport [8192]uint8 + CecAttestationReport [4096]uint8 +} + +type ConfComputeSetKeyRotationThresholdInfo_v1 struct { + Version uint32 + MaxAttackerAdvantage uint64 +} + +type ConfComputeSetKeyRotationThresholdInfo struct { + Version uint32 + MaxAttackerAdvantage uint64 +} + +type ConfComputeGetKeyRotationThresholdInfo_v1 struct { + Version uint32 + AttackerAdvantage uint64 +} + +type ConfComputeGetKeyRotationThresholdInfo struct { + Version uint32 + AttackerAdvantage uint64 +} + +type GpuFabricState byte + +type GpuFabricInfo struct { + ClusterUuid [16]uint8 + Status uint32 + CliqueId uint32 + State uint8 + Pad_cgo_0 [3]byte +} + +type GpuFabricInfo_v2 struct { + Version uint32 + ClusterUuid [16]uint8 + Status uint32 + CliqueId uint32 + State uint8 + HealthMask uint32 +} + +type GpuFabricInfoV struct { + Version uint32 + ClusterUuid [16]uint8 + Status uint32 + CliqueId uint32 + State uint8 + HealthMask uint32 +} + +type PowerScopeType byte + +type PowerValue_v2 struct { + Version uint32 + PowerScope uint8 + PowerValueMw uint32 +} + +type AffinityScope uint32 + +type VgpuVersion struct { + MinVersion uint32 + MaxVersion uint32 +} + +type nvmlVgpuMetadata struct { + Version uint32 + Revision uint32 + GuestInfoState uint32 + GuestDriverVersion [80]int8 + HostDriverVersion [80]int8 + Reserved [6]uint32 + VgpuVirtualizationCaps uint32 + GuestVgpuVersion uint32 + OpaqueDataSize uint32 + OpaqueData [4]int8 +} + +type nvmlVgpuPgpuMetadata struct { + Version uint32 + Revision uint32 + HostDriverVersion [80]int8 + PgpuVirtualizationCaps uint32 + Reserved [5]uint32 + HostSupportedVgpuRange VgpuVersion + OpaqueDataSize uint32 + OpaqueData [4]int8 +} + +type VgpuPgpuCompatibility struct { + VgpuVmCompatibility uint32 + CompatibilityLimitCode uint32 +} + +type ExcludedDeviceInfo struct { + PciInfo PciInfo + Uuid [80]int8 +} + +type GpuInstancePlacement struct { + Start uint32 + Size uint32 +} + +type GpuInstanceProfileInfo struct { + Id uint32 + IsP2pSupported uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 +} + +type GpuInstanceProfileInfo_v2 struct { + Version uint32 + Id uint32 + IsP2pSupported uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 + Name [96]int8 +} + +type GpuInstanceProfileInfo_v3 struct { + Version uint32 + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 + Name [96]int8 + Capabilities uint32 + Pad_cgo_0 [4]byte +} + +type nvmlGpuInstanceInfo struct { + Device nvmlDevice + Id uint32 + ProfileId uint32 + Placement GpuInstancePlacement +} + +type nvmlGpuInstance struct { + Handle *_Ctype_struct_nvmlGpuInstance_st +} + +type ComputeInstancePlacement struct { + Start uint32 + Size uint32 +} + +type ComputeInstanceProfileInfo struct { + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 +} + +type ComputeInstanceProfileInfo_v2 struct { + Version uint32 + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + Name [96]int8 +} + +type ComputeInstanceProfileInfo_v3 struct { + Version uint32 + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + Name [96]int8 + Capabilities uint32 +} + +type nvmlComputeInstanceInfo struct { + Device nvmlDevice + GpuInstance nvmlGpuInstance + Id uint32 + ProfileId uint32 + Placement ComputeInstancePlacement +} + +type nvmlComputeInstance struct { + Handle *_Ctype_struct_nvmlComputeInstance_st +} + +type nvmlGpmSample struct { + Handle *_Ctype_struct_nvmlGpmSample_st +} + +type GpmMetricMetricInfo struct { + ShortName *int8 + LongName *int8 + Unit *int8 +} + +type GpmMetric struct { + MetricId uint32 + NvmlReturn uint32 + Value float64 + MetricInfo GpmMetricMetricInfo +} + +type nvmlGpmMetricsGetType struct { + Version uint32 + NumMetrics uint32 + Sample1 nvmlGpmSample + Sample2 nvmlGpmSample + Metrics [98]GpmMetric +} + +type GpmSupport struct { + Version uint32 + IsSupportedDevice uint32 +} + +type NvLinkPowerThres struct { + LowPwrThreshold uint32 +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go new file mode 100644 index 00000000000..617ad546265 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go @@ -0,0 +1,113 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.UnitGetCount() +func (l *library) UnitGetCount() (int, Return) { + var UnitCount uint32 + ret := nvmlUnitGetCount(&UnitCount) + return int(UnitCount), ret +} + +// nvml.UnitGetHandleByIndex() +func (l *library) UnitGetHandleByIndex(index int) (Unit, Return) { + var unit nvmlUnit + ret := nvmlUnitGetHandleByIndex(uint32(index), &unit) + return unit, ret +} + +// nvml.UnitGetUnitInfo() +func (l *library) UnitGetUnitInfo(unit Unit) (UnitInfo, Return) { + return unit.GetUnitInfo() +} + +func (unit nvmlUnit) GetUnitInfo() (UnitInfo, Return) { + var info UnitInfo + ret := nvmlUnitGetUnitInfo(unit, &info) + return info, ret +} + +// nvml.UnitGetLedState() +func (l *library) UnitGetLedState(unit Unit) (LedState, Return) { + return unit.GetLedState() +} + +func (unit nvmlUnit) GetLedState() (LedState, Return) { + var state LedState + ret := nvmlUnitGetLedState(unit, &state) + return state, ret +} + +// nvml.UnitGetPsuInfo() +func (l *library) UnitGetPsuInfo(unit Unit) (PSUInfo, Return) { + return unit.GetPsuInfo() +} + +func (unit nvmlUnit) GetPsuInfo() (PSUInfo, Return) { + var psu PSUInfo + ret := nvmlUnitGetPsuInfo(unit, &psu) + return psu, ret +} + +// nvml.UnitGetTemperature() +func (l *library) UnitGetTemperature(unit Unit, ttype int) (uint32, Return) { + return unit.GetTemperature(ttype) +} + +func (unit nvmlUnit) GetTemperature(ttype int) (uint32, Return) { + var temp uint32 + ret := nvmlUnitGetTemperature(unit, uint32(ttype), &temp) + return temp, ret +} + +// nvml.UnitGetFanSpeedInfo() +func (l *library) UnitGetFanSpeedInfo(unit Unit) (UnitFanSpeeds, Return) { + return unit.GetFanSpeedInfo() +} + +func (unit nvmlUnit) GetFanSpeedInfo() (UnitFanSpeeds, Return) { + var fanSpeeds UnitFanSpeeds + ret := nvmlUnitGetFanSpeedInfo(unit, &fanSpeeds) + return fanSpeeds, ret +} + +// nvml.UnitGetDevices() +func (l *library) UnitGetDevices(unit Unit) ([]Device, Return) { + return unit.GetDevices() +} + +func (unit nvmlUnit) GetDevices() ([]Device, Return) { + var deviceCount uint32 = 1 // Will be reduced upon returning + for { + devices := make([]nvmlDevice, deviceCount) + ret := nvmlUnitGetDevices(unit, &deviceCount, &devices[0]) + if ret == SUCCESS { + return convertSlice[nvmlDevice, Device](devices[:deviceCount]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + deviceCount *= 2 + } +} + +// nvml.UnitSetLedState() +func (l *library) UnitSetLedState(unit Unit, color LedColor) Return { + return unit.SetLedState(color) +} + +func (unit nvmlUnit) SetLedState(color LedColor) Return { + return nvmlUnitSetLedState(unit, color) +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go new file mode 100644 index 00000000000..da4952422c1 --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go @@ -0,0 +1,480 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +// nvml.VgpuMetadata +type VgpuMetadata struct { + nvmlVgpuMetadata + OpaqueData []byte +} + +// nvml.VgpuPgpuMetadata +type VgpuPgpuMetadata struct { + nvmlVgpuPgpuMetadata + OpaqueData []byte +} + +// nvml.VgpuTypeGetClass() +func (l *library) VgpuTypeGetClass(vgpuTypeId VgpuTypeId) (string, Return) { + return vgpuTypeId.GetClass() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetClass() (string, Return) { + var size uint32 = DEVICE_NAME_BUFFER_SIZE + vgpuTypeClass := make([]byte, DEVICE_NAME_BUFFER_SIZE) + ret := nvmlVgpuTypeGetClass(vgpuTypeId, &vgpuTypeClass[0], &size) + return string(vgpuTypeClass[:clen(vgpuTypeClass)]), ret +} + +// nvml.VgpuTypeGetName() +func (l *library) VgpuTypeGetName(vgpuTypeId VgpuTypeId) (string, Return) { + return vgpuTypeId.GetName() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetName() (string, Return) { + var size uint32 = DEVICE_NAME_BUFFER_SIZE + vgpuTypeName := make([]byte, DEVICE_NAME_BUFFER_SIZE) + ret := nvmlVgpuTypeGetName(vgpuTypeId, &vgpuTypeName[0], &size) + return string(vgpuTypeName[:clen(vgpuTypeName)]), ret +} + +// nvml.VgpuTypeGetGpuInstanceProfileId() +func (l *library) VgpuTypeGetGpuInstanceProfileId(vgpuTypeId VgpuTypeId) (uint32, Return) { + return vgpuTypeId.GetGpuInstanceProfileId() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetGpuInstanceProfileId() (uint32, Return) { + var size uint32 + ret := nvmlVgpuTypeGetGpuInstanceProfileId(vgpuTypeId, &size) + return size, ret +} + +// nvml.VgpuTypeGetDeviceID() +func (l *library) VgpuTypeGetDeviceID(vgpuTypeId VgpuTypeId) (uint64, uint64, Return) { + return vgpuTypeId.GetDeviceID() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetDeviceID() (uint64, uint64, Return) { + var deviceID, subsystemID uint64 + ret := nvmlVgpuTypeGetDeviceID(vgpuTypeId, &deviceID, &subsystemID) + return deviceID, subsystemID, ret +} + +// nvml.VgpuTypeGetFramebufferSize() +func (l *library) VgpuTypeGetFramebufferSize(vgpuTypeId VgpuTypeId) (uint64, Return) { + return vgpuTypeId.GetFramebufferSize() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetFramebufferSize() (uint64, Return) { + var fbSize uint64 + ret := nvmlVgpuTypeGetFramebufferSize(vgpuTypeId, &fbSize) + return fbSize, ret +} + +// nvml.VgpuTypeGetNumDisplayHeads() +func (l *library) VgpuTypeGetNumDisplayHeads(vgpuTypeId VgpuTypeId) (int, Return) { + return vgpuTypeId.GetNumDisplayHeads() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetNumDisplayHeads() (int, Return) { + var numDisplayHeads uint32 + ret := nvmlVgpuTypeGetNumDisplayHeads(vgpuTypeId, &numDisplayHeads) + return int(numDisplayHeads), ret +} + +// nvml.VgpuTypeGetResolution() +func (l *library) VgpuTypeGetResolution(vgpuTypeId VgpuTypeId, displayIndex int) (uint32, uint32, Return) { + return vgpuTypeId.GetResolution(displayIndex) +} + +func (vgpuTypeId nvmlVgpuTypeId) GetResolution(displayIndex int) (uint32, uint32, Return) { + var xdim, ydim uint32 + ret := nvmlVgpuTypeGetResolution(vgpuTypeId, uint32(displayIndex), &xdim, &ydim) + return xdim, ydim, ret +} + +// nvml.VgpuTypeGetLicense() +func (l *library) VgpuTypeGetLicense(vgpuTypeId VgpuTypeId) (string, Return) { + return vgpuTypeId.GetLicense() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetLicense() (string, Return) { + vgpuTypeLicenseString := make([]byte, GRID_LICENSE_BUFFER_SIZE) + ret := nvmlVgpuTypeGetLicense(vgpuTypeId, &vgpuTypeLicenseString[0], GRID_LICENSE_BUFFER_SIZE) + return string(vgpuTypeLicenseString[:clen(vgpuTypeLicenseString)]), ret +} + +// nvml.VgpuTypeGetFrameRateLimit() +func (l *library) VgpuTypeGetFrameRateLimit(vgpuTypeId VgpuTypeId) (uint32, Return) { + return vgpuTypeId.GetFrameRateLimit() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetFrameRateLimit() (uint32, Return) { + var frameRateLimit uint32 + ret := nvmlVgpuTypeGetFrameRateLimit(vgpuTypeId, &frameRateLimit) + return frameRateLimit, ret +} + +// nvml.VgpuTypeGetMaxInstances() +func (l *library) VgpuTypeGetMaxInstances(device Device, vgpuTypeId VgpuTypeId) (int, Return) { + return vgpuTypeId.GetMaxInstances(device) +} + +func (device nvmlDevice) VgpuTypeGetMaxInstances(vgpuTypeId VgpuTypeId) (int, Return) { + return vgpuTypeId.GetMaxInstances(device) +} + +func (vgpuTypeId nvmlVgpuTypeId) GetMaxInstances(device Device) (int, Return) { + var vgpuInstanceCount uint32 + ret := nvmlVgpuTypeGetMaxInstances(nvmlDeviceHandle(device), vgpuTypeId, &vgpuInstanceCount) + return int(vgpuInstanceCount), ret +} + +// nvml.VgpuTypeGetMaxInstancesPerVm() +func (l *library) VgpuTypeGetMaxInstancesPerVm(vgpuTypeId VgpuTypeId) (int, Return) { + return vgpuTypeId.GetMaxInstancesPerVm() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetMaxInstancesPerVm() (int, Return) { + var vgpuInstanceCountPerVm uint32 + ret := nvmlVgpuTypeGetMaxInstancesPerVm(vgpuTypeId, &vgpuInstanceCountPerVm) + return int(vgpuInstanceCountPerVm), ret +} + +// nvml.VgpuInstanceGetVmID() +func (l *library) VgpuInstanceGetVmID(vgpuInstance VgpuInstance) (string, VgpuVmIdType, Return) { + return vgpuInstance.GetVmID() +} + +func (vgpuInstance nvmlVgpuInstance) GetVmID() (string, VgpuVmIdType, Return) { + var vmIdType VgpuVmIdType + vmId := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetVmID(vgpuInstance, &vmId[0], DEVICE_UUID_BUFFER_SIZE, &vmIdType) + return string(vmId[:clen(vmId)]), vmIdType, ret +} + +// nvml.VgpuInstanceGetUUID() +func (l *library) VgpuInstanceGetUUID(vgpuInstance VgpuInstance) (string, Return) { + return vgpuInstance.GetUUID() +} + +func (vgpuInstance nvmlVgpuInstance) GetUUID() (string, Return) { + uuid := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetUUID(vgpuInstance, &uuid[0], DEVICE_UUID_BUFFER_SIZE) + return string(uuid[:clen(uuid)]), ret +} + +// nvml.VgpuInstanceGetVmDriverVersion() +func (l *library) VgpuInstanceGetVmDriverVersion(vgpuInstance VgpuInstance) (string, Return) { + return vgpuInstance.GetVmDriverVersion() +} + +func (vgpuInstance nvmlVgpuInstance) GetVmDriverVersion() (string, Return) { + version := make([]byte, SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetVmDriverVersion(vgpuInstance, &version[0], SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + return string(version[:clen(version)]), ret +} + +// nvml.VgpuInstanceGetFbUsage() +func (l *library) VgpuInstanceGetFbUsage(vgpuInstance VgpuInstance) (uint64, Return) { + return vgpuInstance.GetFbUsage() +} + +func (vgpuInstance nvmlVgpuInstance) GetFbUsage() (uint64, Return) { + var fbUsage uint64 + ret := nvmlVgpuInstanceGetFbUsage(vgpuInstance, &fbUsage) + return fbUsage, ret +} + +// nvml.VgpuInstanceGetLicenseInfo() +func (l *library) VgpuInstanceGetLicenseInfo(vgpuInstance VgpuInstance) (VgpuLicenseInfo, Return) { + return vgpuInstance.GetLicenseInfo() +} + +func (vgpuInstance nvmlVgpuInstance) GetLicenseInfo() (VgpuLicenseInfo, Return) { + var licenseInfo VgpuLicenseInfo + ret := nvmlVgpuInstanceGetLicenseInfo(vgpuInstance, &licenseInfo) + return licenseInfo, ret +} + +// nvml.VgpuInstanceGetLicenseStatus() +func (l *library) VgpuInstanceGetLicenseStatus(vgpuInstance VgpuInstance) (int, Return) { + return vgpuInstance.GetLicenseStatus() +} + +func (vgpuInstance nvmlVgpuInstance) GetLicenseStatus() (int, Return) { + var licensed uint32 + ret := nvmlVgpuInstanceGetLicenseStatus(vgpuInstance, &licensed) + return int(licensed), ret +} + +// nvml.VgpuInstanceGetType() +func (l *library) VgpuInstanceGetType(vgpuInstance VgpuInstance) (VgpuTypeId, Return) { + return vgpuInstance.GetType() +} + +func (vgpuInstance nvmlVgpuInstance) GetType() (VgpuTypeId, Return) { + var vgpuTypeId nvmlVgpuTypeId + ret := nvmlVgpuInstanceGetType(vgpuInstance, &vgpuTypeId) + return vgpuTypeId, ret +} + +// nvml.VgpuInstanceGetFrameRateLimit() +func (l *library) VgpuInstanceGetFrameRateLimit(vgpuInstance VgpuInstance) (uint32, Return) { + return vgpuInstance.GetFrameRateLimit() +} + +func (vgpuInstance nvmlVgpuInstance) GetFrameRateLimit() (uint32, Return) { + var frameRateLimit uint32 + ret := nvmlVgpuInstanceGetFrameRateLimit(vgpuInstance, &frameRateLimit) + return frameRateLimit, ret +} + +// nvml.VgpuInstanceGetEccMode() +func (l *library) VgpuInstanceGetEccMode(vgpuInstance VgpuInstance) (EnableState, Return) { + return vgpuInstance.GetEccMode() +} + +func (vgpuInstance nvmlVgpuInstance) GetEccMode() (EnableState, Return) { + var eccMode EnableState + ret := nvmlVgpuInstanceGetEccMode(vgpuInstance, &eccMode) + return eccMode, ret +} + +// nvml.VgpuInstanceGetEncoderCapacity() +func (l *library) VgpuInstanceGetEncoderCapacity(vgpuInstance VgpuInstance) (int, Return) { + return vgpuInstance.GetEncoderCapacity() +} + +func (vgpuInstance nvmlVgpuInstance) GetEncoderCapacity() (int, Return) { + var encoderCapacity uint32 + ret := nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance, &encoderCapacity) + return int(encoderCapacity), ret +} + +// nvml.VgpuInstanceSetEncoderCapacity() +func (l *library) VgpuInstanceSetEncoderCapacity(vgpuInstance VgpuInstance, encoderCapacity int) Return { + return vgpuInstance.SetEncoderCapacity(encoderCapacity) +} + +func (vgpuInstance nvmlVgpuInstance) SetEncoderCapacity(encoderCapacity int) Return { + return nvmlVgpuInstanceSetEncoderCapacity(vgpuInstance, uint32(encoderCapacity)) +} + +// nvml.VgpuInstanceGetEncoderStats() +func (l *library) VgpuInstanceGetEncoderStats(vgpuInstance VgpuInstance) (int, uint32, uint32, Return) { + return vgpuInstance.GetEncoderStats() +} + +func (vgpuInstance nvmlVgpuInstance) GetEncoderStats() (int, uint32, uint32, Return) { + var sessionCount, averageFps, averageLatency uint32 + ret := nvmlVgpuInstanceGetEncoderStats(vgpuInstance, &sessionCount, &averageFps, &averageLatency) + return int(sessionCount), averageFps, averageLatency, ret +} + +// nvml.VgpuInstanceGetEncoderSessions() +func (l *library) VgpuInstanceGetEncoderSessions(vgpuInstance VgpuInstance) (int, EncoderSessionInfo, Return) { + return vgpuInstance.GetEncoderSessions() +} + +func (vgpuInstance nvmlVgpuInstance) GetEncoderSessions() (int, EncoderSessionInfo, Return) { + var sessionCount uint32 + var sessionInfo EncoderSessionInfo + ret := nvmlVgpuInstanceGetEncoderSessions(vgpuInstance, &sessionCount, &sessionInfo) + return int(sessionCount), sessionInfo, ret +} + +// nvml.VgpuInstanceGetFBCStats() +func (l *library) VgpuInstanceGetFBCStats(vgpuInstance VgpuInstance) (FBCStats, Return) { + return vgpuInstance.GetFBCStats() +} + +func (vgpuInstance nvmlVgpuInstance) GetFBCStats() (FBCStats, Return) { + var fbcStats FBCStats + ret := nvmlVgpuInstanceGetFBCStats(vgpuInstance, &fbcStats) + return fbcStats, ret +} + +// nvml.VgpuInstanceGetFBCSessions() +func (l *library) VgpuInstanceGetFBCSessions(vgpuInstance VgpuInstance) (int, FBCSessionInfo, Return) { + return vgpuInstance.GetFBCSessions() +} + +func (vgpuInstance nvmlVgpuInstance) GetFBCSessions() (int, FBCSessionInfo, Return) { + var sessionCount uint32 + var sessionInfo FBCSessionInfo + ret := nvmlVgpuInstanceGetFBCSessions(vgpuInstance, &sessionCount, &sessionInfo) + return int(sessionCount), sessionInfo, ret +} + +// nvml.VgpuInstanceGetGpuInstanceId() +func (l *library) VgpuInstanceGetGpuInstanceId(vgpuInstance VgpuInstance) (int, Return) { + return vgpuInstance.GetGpuInstanceId() +} + +func (vgpuInstance nvmlVgpuInstance) GetGpuInstanceId() (int, Return) { + var gpuInstanceId uint32 + ret := nvmlVgpuInstanceGetGpuInstanceId(vgpuInstance, &gpuInstanceId) + return int(gpuInstanceId), ret +} + +// nvml.VgpuInstanceGetGpuPciId() +func (l *library) VgpuInstanceGetGpuPciId(vgpuInstance VgpuInstance) (string, Return) { + return vgpuInstance.GetGpuPciId() +} + +func (vgpuInstance nvmlVgpuInstance) GetGpuPciId() (string, Return) { + var length uint32 = 1 // Will be reduced upon returning + for { + vgpuPciId := make([]byte, length) + ret := nvmlVgpuInstanceGetGpuPciId(vgpuInstance, &vgpuPciId[0], &length) + if ret == SUCCESS { + return string(vgpuPciId[:clen(vgpuPciId)]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return "", ret + } + length *= 2 + } +} + +// nvml.VgpuInstanceGetMetadata() +func (l *library) VgpuInstanceGetMetadata(vgpuInstance VgpuInstance) (VgpuMetadata, Return) { + return vgpuInstance.GetMetadata() +} + +func (vgpuInstance nvmlVgpuInstance) GetMetadata() (VgpuMetadata, Return) { + var vgpuMetadata VgpuMetadata + opaqueDataSize := unsafe.Sizeof(vgpuMetadata.nvmlVgpuMetadata.OpaqueData) + vgpuMetadataSize := unsafe.Sizeof(vgpuMetadata.nvmlVgpuMetadata) - opaqueDataSize + for { + bufferSize := uint32(vgpuMetadataSize + opaqueDataSize) + buffer := make([]byte, bufferSize) + nvmlVgpuMetadataPtr := (*nvmlVgpuMetadata)(unsafe.Pointer(&buffer[0])) + ret := nvmlVgpuInstanceGetMetadata(vgpuInstance, nvmlVgpuMetadataPtr, &bufferSize) + if ret == SUCCESS { + vgpuMetadata.nvmlVgpuMetadata = *nvmlVgpuMetadataPtr + vgpuMetadata.OpaqueData = buffer[vgpuMetadataSize:bufferSize] + return vgpuMetadata, ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return vgpuMetadata, ret + } + opaqueDataSize = 2 * opaqueDataSize + } +} + +// nvml.VgpuInstanceGetAccountingMode() +func (l *library) VgpuInstanceGetAccountingMode(vgpuInstance VgpuInstance) (EnableState, Return) { + return vgpuInstance.GetAccountingMode() +} + +func (vgpuInstance nvmlVgpuInstance) GetAccountingMode() (EnableState, Return) { + var mode EnableState + ret := nvmlVgpuInstanceGetAccountingMode(vgpuInstance, &mode) + return mode, ret +} + +// nvml.VgpuInstanceGetAccountingPids() +func (l *library) VgpuInstanceGetAccountingPids(vgpuInstance VgpuInstance) ([]int, Return) { + return vgpuInstance.GetAccountingPids() +} + +func (vgpuInstance nvmlVgpuInstance) GetAccountingPids() ([]int, Return) { + var count uint32 = 1 // Will be reduced upon returning + for { + pids := make([]uint32, count) + ret := nvmlVgpuInstanceGetAccountingPids(vgpuInstance, &count, &pids[0]) + if ret == SUCCESS { + return uint32SliceToIntSlice(pids[:count]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + count *= 2 + } +} + +// nvml.VgpuInstanceGetAccountingStats() +func (l *library) VgpuInstanceGetAccountingStats(vgpuInstance VgpuInstance, pid int) (AccountingStats, Return) { + return vgpuInstance.GetAccountingStats(pid) +} + +func (vgpuInstance nvmlVgpuInstance) GetAccountingStats(pid int) (AccountingStats, Return) { + var stats AccountingStats + ret := nvmlVgpuInstanceGetAccountingStats(vgpuInstance, uint32(pid), &stats) + return stats, ret +} + +// nvml.GetVgpuCompatibility() +func (l *library) GetVgpuCompatibility(vgpuMetadata *VgpuMetadata, pgpuMetadata *VgpuPgpuMetadata) (VgpuPgpuCompatibility, Return) { + var compatibilityInfo VgpuPgpuCompatibility + ret := nvmlGetVgpuCompatibility(&vgpuMetadata.nvmlVgpuMetadata, &pgpuMetadata.nvmlVgpuPgpuMetadata, &compatibilityInfo) + return compatibilityInfo, ret +} + +// nvml.GetVgpuVersion() +func (l *library) GetVgpuVersion() (VgpuVersion, VgpuVersion, Return) { + var supported, current VgpuVersion + ret := nvmlGetVgpuVersion(&supported, ¤t) + return supported, current, ret +} + +// nvml.SetVgpuVersion() +func (l *library) SetVgpuVersion(vgpuVersion *VgpuVersion) Return { + return nvmlSetVgpuVersion(vgpuVersion) +} + +// nvml.VgpuInstanceClearAccountingPids() +func (l *library) VgpuInstanceClearAccountingPids(vgpuInstance VgpuInstance) Return { + return vgpuInstance.ClearAccountingPids() +} + +func (vgpuInstance nvmlVgpuInstance) ClearAccountingPids() Return { + return nvmlVgpuInstanceClearAccountingPids(vgpuInstance) +} + +// nvml.VgpuInstanceGetMdevUUID() +func (l *library) VgpuInstanceGetMdevUUID(vgpuInstance VgpuInstance) (string, Return) { + return vgpuInstance.GetMdevUUID() +} + +func (vgpuInstance nvmlVgpuInstance) GetMdevUUID() (string, Return) { + mdevUUID := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetMdevUUID(vgpuInstance, &mdevUUID[0], DEVICE_UUID_BUFFER_SIZE) + return string(mdevUUID[:clen(mdevUUID)]), ret +} + +// nvml.VgpuTypeGetCapabilities() +func (l *library) VgpuTypeGetCapabilities(vgpuTypeId VgpuTypeId, capability VgpuCapability) (bool, Return) { + return vgpuTypeId.GetCapabilities(capability) +} + +func (vgpuTypeId nvmlVgpuTypeId) GetCapabilities(capability VgpuCapability) (bool, Return) { + var capResult uint32 + ret := nvmlVgpuTypeGetCapabilities(vgpuTypeId, capability, &capResult) + return (capResult != 0), ret +} + +// nvml.GetVgpuDriverCapabilities() +func (l *library) GetVgpuDriverCapabilities(capability VgpuDriverCapability) (bool, Return) { + var capResult uint32 + ret := nvmlGetVgpuDriverCapabilities(capability, &capResult) + return (capResult != 0), ret +} diff --git a/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go new file mode 100644 index 00000000000..c1ecb2d0ebf --- /dev/null +++ b/ecs-init/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go @@ -0,0 +1,1007 @@ +/** +# Copyright 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Generated Code; DO NOT EDIT. + +package nvml + +// The variables below represent package level methods from the library type. +var ( + ComputeInstanceDestroy = libnvml.ComputeInstanceDestroy + ComputeInstanceGetInfo = libnvml.ComputeInstanceGetInfo + DeviceClearAccountingPids = libnvml.DeviceClearAccountingPids + DeviceClearCpuAffinity = libnvml.DeviceClearCpuAffinity + DeviceClearEccErrorCounts = libnvml.DeviceClearEccErrorCounts + DeviceClearFieldValues = libnvml.DeviceClearFieldValues + DeviceCreateGpuInstance = libnvml.DeviceCreateGpuInstance + DeviceCreateGpuInstanceWithPlacement = libnvml.DeviceCreateGpuInstanceWithPlacement + DeviceDiscoverGpus = libnvml.DeviceDiscoverGpus + DeviceFreezeNvLinkUtilizationCounter = libnvml.DeviceFreezeNvLinkUtilizationCounter + DeviceGetAPIRestriction = libnvml.DeviceGetAPIRestriction + DeviceGetAccountingBufferSize = libnvml.DeviceGetAccountingBufferSize + DeviceGetAccountingMode = libnvml.DeviceGetAccountingMode + DeviceGetAccountingPids = libnvml.DeviceGetAccountingPids + DeviceGetAccountingStats = libnvml.DeviceGetAccountingStats + DeviceGetActiveVgpus = libnvml.DeviceGetActiveVgpus + DeviceGetAdaptiveClockInfoStatus = libnvml.DeviceGetAdaptiveClockInfoStatus + DeviceGetApplicationsClock = libnvml.DeviceGetApplicationsClock + DeviceGetArchitecture = libnvml.DeviceGetArchitecture + DeviceGetAttributes = libnvml.DeviceGetAttributes + DeviceGetAutoBoostedClocksEnabled = libnvml.DeviceGetAutoBoostedClocksEnabled + DeviceGetBAR1MemoryInfo = libnvml.DeviceGetBAR1MemoryInfo + DeviceGetBoardId = libnvml.DeviceGetBoardId + DeviceGetBoardPartNumber = libnvml.DeviceGetBoardPartNumber + DeviceGetBrand = libnvml.DeviceGetBrand + DeviceGetBridgeChipInfo = libnvml.DeviceGetBridgeChipInfo + DeviceGetBusType = libnvml.DeviceGetBusType + DeviceGetC2cModeInfoV = libnvml.DeviceGetC2cModeInfoV + DeviceGetClkMonStatus = libnvml.DeviceGetClkMonStatus + DeviceGetClock = libnvml.DeviceGetClock + DeviceGetClockInfo = libnvml.DeviceGetClockInfo + DeviceGetComputeInstanceId = libnvml.DeviceGetComputeInstanceId + DeviceGetComputeMode = libnvml.DeviceGetComputeMode + DeviceGetComputeRunningProcesses = libnvml.DeviceGetComputeRunningProcesses + DeviceGetConfComputeGpuAttestationReport = libnvml.DeviceGetConfComputeGpuAttestationReport + DeviceGetConfComputeGpuCertificate = libnvml.DeviceGetConfComputeGpuCertificate + DeviceGetConfComputeMemSizeInfo = libnvml.DeviceGetConfComputeMemSizeInfo + DeviceGetConfComputeProtectedMemoryUsage = libnvml.DeviceGetConfComputeProtectedMemoryUsage + DeviceGetCount = libnvml.DeviceGetCount + DeviceGetCpuAffinity = libnvml.DeviceGetCpuAffinity + DeviceGetCpuAffinityWithinScope = libnvml.DeviceGetCpuAffinityWithinScope + DeviceGetCreatableVgpus = libnvml.DeviceGetCreatableVgpus + DeviceGetCudaComputeCapability = libnvml.DeviceGetCudaComputeCapability + DeviceGetCurrPcieLinkGeneration = libnvml.DeviceGetCurrPcieLinkGeneration + DeviceGetCurrPcieLinkWidth = libnvml.DeviceGetCurrPcieLinkWidth + DeviceGetCurrentClocksEventReasons = libnvml.DeviceGetCurrentClocksEventReasons + DeviceGetCurrentClocksThrottleReasons = libnvml.DeviceGetCurrentClocksThrottleReasons + DeviceGetDecoderUtilization = libnvml.DeviceGetDecoderUtilization + DeviceGetDefaultApplicationsClock = libnvml.DeviceGetDefaultApplicationsClock + DeviceGetDefaultEccMode = libnvml.DeviceGetDefaultEccMode + DeviceGetDetailedEccErrors = libnvml.DeviceGetDetailedEccErrors + DeviceGetDeviceHandleFromMigDeviceHandle = libnvml.DeviceGetDeviceHandleFromMigDeviceHandle + DeviceGetDisplayActive = libnvml.DeviceGetDisplayActive + DeviceGetDisplayMode = libnvml.DeviceGetDisplayMode + DeviceGetDriverModel = libnvml.DeviceGetDriverModel + DeviceGetDynamicPstatesInfo = libnvml.DeviceGetDynamicPstatesInfo + DeviceGetEccMode = libnvml.DeviceGetEccMode + DeviceGetEncoderCapacity = libnvml.DeviceGetEncoderCapacity + DeviceGetEncoderSessions = libnvml.DeviceGetEncoderSessions + DeviceGetEncoderStats = libnvml.DeviceGetEncoderStats + DeviceGetEncoderUtilization = libnvml.DeviceGetEncoderUtilization + DeviceGetEnforcedPowerLimit = libnvml.DeviceGetEnforcedPowerLimit + DeviceGetFBCSessions = libnvml.DeviceGetFBCSessions + DeviceGetFBCStats = libnvml.DeviceGetFBCStats + DeviceGetFanControlPolicy_v2 = libnvml.DeviceGetFanControlPolicy_v2 + DeviceGetFanSpeed = libnvml.DeviceGetFanSpeed + DeviceGetFanSpeed_v2 = libnvml.DeviceGetFanSpeed_v2 + DeviceGetFieldValues = libnvml.DeviceGetFieldValues + DeviceGetGpcClkMinMaxVfOffset = libnvml.DeviceGetGpcClkMinMaxVfOffset + DeviceGetGpcClkVfOffset = libnvml.DeviceGetGpcClkVfOffset + DeviceGetGpuFabricInfo = libnvml.DeviceGetGpuFabricInfo + DeviceGetGpuFabricInfoV = libnvml.DeviceGetGpuFabricInfoV + DeviceGetGpuInstanceById = libnvml.DeviceGetGpuInstanceById + DeviceGetGpuInstanceId = libnvml.DeviceGetGpuInstanceId + DeviceGetGpuInstancePossiblePlacements = libnvml.DeviceGetGpuInstancePossiblePlacements + DeviceGetGpuInstanceProfileInfo = libnvml.DeviceGetGpuInstanceProfileInfo + DeviceGetGpuInstanceProfileInfoV = libnvml.DeviceGetGpuInstanceProfileInfoV + DeviceGetGpuInstanceRemainingCapacity = libnvml.DeviceGetGpuInstanceRemainingCapacity + DeviceGetGpuInstances = libnvml.DeviceGetGpuInstances + DeviceGetGpuMaxPcieLinkGeneration = libnvml.DeviceGetGpuMaxPcieLinkGeneration + DeviceGetGpuOperationMode = libnvml.DeviceGetGpuOperationMode + DeviceGetGraphicsRunningProcesses = libnvml.DeviceGetGraphicsRunningProcesses + DeviceGetGridLicensableFeatures = libnvml.DeviceGetGridLicensableFeatures + DeviceGetGspFirmwareMode = libnvml.DeviceGetGspFirmwareMode + DeviceGetGspFirmwareVersion = libnvml.DeviceGetGspFirmwareVersion + DeviceGetHandleByIndex = libnvml.DeviceGetHandleByIndex + DeviceGetHandleByPciBusId = libnvml.DeviceGetHandleByPciBusId + DeviceGetHandleBySerial = libnvml.DeviceGetHandleBySerial + DeviceGetHandleByUUID = libnvml.DeviceGetHandleByUUID + DeviceGetHostVgpuMode = libnvml.DeviceGetHostVgpuMode + DeviceGetIndex = libnvml.DeviceGetIndex + DeviceGetInforomConfigurationChecksum = libnvml.DeviceGetInforomConfigurationChecksum + DeviceGetInforomImageVersion = libnvml.DeviceGetInforomImageVersion + DeviceGetInforomVersion = libnvml.DeviceGetInforomVersion + DeviceGetIrqNum = libnvml.DeviceGetIrqNum + DeviceGetJpgUtilization = libnvml.DeviceGetJpgUtilization + DeviceGetLastBBXFlushTime = libnvml.DeviceGetLastBBXFlushTime + DeviceGetMPSComputeRunningProcesses = libnvml.DeviceGetMPSComputeRunningProcesses + DeviceGetMaxClockInfo = libnvml.DeviceGetMaxClockInfo + DeviceGetMaxCustomerBoostClock = libnvml.DeviceGetMaxCustomerBoostClock + DeviceGetMaxMigDeviceCount = libnvml.DeviceGetMaxMigDeviceCount + DeviceGetMaxPcieLinkGeneration = libnvml.DeviceGetMaxPcieLinkGeneration + DeviceGetMaxPcieLinkWidth = libnvml.DeviceGetMaxPcieLinkWidth + DeviceGetMemClkMinMaxVfOffset = libnvml.DeviceGetMemClkMinMaxVfOffset + DeviceGetMemClkVfOffset = libnvml.DeviceGetMemClkVfOffset + DeviceGetMemoryAffinity = libnvml.DeviceGetMemoryAffinity + DeviceGetMemoryBusWidth = libnvml.DeviceGetMemoryBusWidth + DeviceGetMemoryErrorCounter = libnvml.DeviceGetMemoryErrorCounter + DeviceGetMemoryInfo = libnvml.DeviceGetMemoryInfo + DeviceGetMemoryInfo_v2 = libnvml.DeviceGetMemoryInfo_v2 + DeviceGetMigDeviceHandleByIndex = libnvml.DeviceGetMigDeviceHandleByIndex + DeviceGetMigMode = libnvml.DeviceGetMigMode + DeviceGetMinMaxClockOfPState = libnvml.DeviceGetMinMaxClockOfPState + DeviceGetMinMaxFanSpeed = libnvml.DeviceGetMinMaxFanSpeed + DeviceGetMinorNumber = libnvml.DeviceGetMinorNumber + DeviceGetModuleId = libnvml.DeviceGetModuleId + DeviceGetMultiGpuBoard = libnvml.DeviceGetMultiGpuBoard + DeviceGetName = libnvml.DeviceGetName + DeviceGetNumFans = libnvml.DeviceGetNumFans + DeviceGetNumGpuCores = libnvml.DeviceGetNumGpuCores + DeviceGetNumaNodeId = libnvml.DeviceGetNumaNodeId + DeviceGetNvLinkCapability = libnvml.DeviceGetNvLinkCapability + DeviceGetNvLinkErrorCounter = libnvml.DeviceGetNvLinkErrorCounter + DeviceGetNvLinkRemoteDeviceType = libnvml.DeviceGetNvLinkRemoteDeviceType + DeviceGetNvLinkRemotePciInfo = libnvml.DeviceGetNvLinkRemotePciInfo + DeviceGetNvLinkState = libnvml.DeviceGetNvLinkState + DeviceGetNvLinkUtilizationControl = libnvml.DeviceGetNvLinkUtilizationControl + DeviceGetNvLinkUtilizationCounter = libnvml.DeviceGetNvLinkUtilizationCounter + DeviceGetNvLinkVersion = libnvml.DeviceGetNvLinkVersion + DeviceGetOfaUtilization = libnvml.DeviceGetOfaUtilization + DeviceGetP2PStatus = libnvml.DeviceGetP2PStatus + DeviceGetPciInfo = libnvml.DeviceGetPciInfo + DeviceGetPciInfoExt = libnvml.DeviceGetPciInfoExt + DeviceGetPcieLinkMaxSpeed = libnvml.DeviceGetPcieLinkMaxSpeed + DeviceGetPcieReplayCounter = libnvml.DeviceGetPcieReplayCounter + DeviceGetPcieSpeed = libnvml.DeviceGetPcieSpeed + DeviceGetPcieThroughput = libnvml.DeviceGetPcieThroughput + DeviceGetPerformanceState = libnvml.DeviceGetPerformanceState + DeviceGetPersistenceMode = libnvml.DeviceGetPersistenceMode + DeviceGetPgpuMetadataString = libnvml.DeviceGetPgpuMetadataString + DeviceGetPowerManagementDefaultLimit = libnvml.DeviceGetPowerManagementDefaultLimit + DeviceGetPowerManagementLimit = libnvml.DeviceGetPowerManagementLimit + DeviceGetPowerManagementLimitConstraints = libnvml.DeviceGetPowerManagementLimitConstraints + DeviceGetPowerManagementMode = libnvml.DeviceGetPowerManagementMode + DeviceGetPowerSource = libnvml.DeviceGetPowerSource + DeviceGetPowerState = libnvml.DeviceGetPowerState + DeviceGetPowerUsage = libnvml.DeviceGetPowerUsage + DeviceGetProcessUtilization = libnvml.DeviceGetProcessUtilization + DeviceGetProcessesUtilizationInfo = libnvml.DeviceGetProcessesUtilizationInfo + DeviceGetRemappedRows = libnvml.DeviceGetRemappedRows + DeviceGetRetiredPages = libnvml.DeviceGetRetiredPages + DeviceGetRetiredPagesPendingStatus = libnvml.DeviceGetRetiredPagesPendingStatus + DeviceGetRetiredPages_v2 = libnvml.DeviceGetRetiredPages_v2 + DeviceGetRowRemapperHistogram = libnvml.DeviceGetRowRemapperHistogram + DeviceGetRunningProcessDetailList = libnvml.DeviceGetRunningProcessDetailList + DeviceGetSamples = libnvml.DeviceGetSamples + DeviceGetSerial = libnvml.DeviceGetSerial + DeviceGetSramEccErrorStatus = libnvml.DeviceGetSramEccErrorStatus + DeviceGetSupportedClocksEventReasons = libnvml.DeviceGetSupportedClocksEventReasons + DeviceGetSupportedClocksThrottleReasons = libnvml.DeviceGetSupportedClocksThrottleReasons + DeviceGetSupportedEventTypes = libnvml.DeviceGetSupportedEventTypes + DeviceGetSupportedGraphicsClocks = libnvml.DeviceGetSupportedGraphicsClocks + DeviceGetSupportedMemoryClocks = libnvml.DeviceGetSupportedMemoryClocks + DeviceGetSupportedPerformanceStates = libnvml.DeviceGetSupportedPerformanceStates + DeviceGetSupportedVgpus = libnvml.DeviceGetSupportedVgpus + DeviceGetTargetFanSpeed = libnvml.DeviceGetTargetFanSpeed + DeviceGetTemperature = libnvml.DeviceGetTemperature + DeviceGetTemperatureThreshold = libnvml.DeviceGetTemperatureThreshold + DeviceGetThermalSettings = libnvml.DeviceGetThermalSettings + DeviceGetTopologyCommonAncestor = libnvml.DeviceGetTopologyCommonAncestor + DeviceGetTopologyNearestGpus = libnvml.DeviceGetTopologyNearestGpus + DeviceGetTotalEccErrors = libnvml.DeviceGetTotalEccErrors + DeviceGetTotalEnergyConsumption = libnvml.DeviceGetTotalEnergyConsumption + DeviceGetUUID = libnvml.DeviceGetUUID + DeviceGetUtilizationRates = libnvml.DeviceGetUtilizationRates + DeviceGetVbiosVersion = libnvml.DeviceGetVbiosVersion + DeviceGetVgpuCapabilities = libnvml.DeviceGetVgpuCapabilities + DeviceGetVgpuHeterogeneousMode = libnvml.DeviceGetVgpuHeterogeneousMode + DeviceGetVgpuInstancesUtilizationInfo = libnvml.DeviceGetVgpuInstancesUtilizationInfo + DeviceGetVgpuMetadata = libnvml.DeviceGetVgpuMetadata + DeviceGetVgpuProcessUtilization = libnvml.DeviceGetVgpuProcessUtilization + DeviceGetVgpuProcessesUtilizationInfo = libnvml.DeviceGetVgpuProcessesUtilizationInfo + DeviceGetVgpuSchedulerCapabilities = libnvml.DeviceGetVgpuSchedulerCapabilities + DeviceGetVgpuSchedulerLog = libnvml.DeviceGetVgpuSchedulerLog + DeviceGetVgpuSchedulerState = libnvml.DeviceGetVgpuSchedulerState + DeviceGetVgpuTypeCreatablePlacements = libnvml.DeviceGetVgpuTypeCreatablePlacements + DeviceGetVgpuTypeSupportedPlacements = libnvml.DeviceGetVgpuTypeSupportedPlacements + DeviceGetVgpuUtilization = libnvml.DeviceGetVgpuUtilization + DeviceGetViolationStatus = libnvml.DeviceGetViolationStatus + DeviceGetVirtualizationMode = libnvml.DeviceGetVirtualizationMode + DeviceIsMigDeviceHandle = libnvml.DeviceIsMigDeviceHandle + DeviceModifyDrainState = libnvml.DeviceModifyDrainState + DeviceOnSameBoard = libnvml.DeviceOnSameBoard + DeviceQueryDrainState = libnvml.DeviceQueryDrainState + DeviceRegisterEvents = libnvml.DeviceRegisterEvents + DeviceRemoveGpu = libnvml.DeviceRemoveGpu + DeviceRemoveGpu_v2 = libnvml.DeviceRemoveGpu_v2 + DeviceResetApplicationsClocks = libnvml.DeviceResetApplicationsClocks + DeviceResetGpuLockedClocks = libnvml.DeviceResetGpuLockedClocks + DeviceResetMemoryLockedClocks = libnvml.DeviceResetMemoryLockedClocks + DeviceResetNvLinkErrorCounters = libnvml.DeviceResetNvLinkErrorCounters + DeviceResetNvLinkUtilizationCounter = libnvml.DeviceResetNvLinkUtilizationCounter + DeviceSetAPIRestriction = libnvml.DeviceSetAPIRestriction + DeviceSetAccountingMode = libnvml.DeviceSetAccountingMode + DeviceSetApplicationsClocks = libnvml.DeviceSetApplicationsClocks + DeviceSetAutoBoostedClocksEnabled = libnvml.DeviceSetAutoBoostedClocksEnabled + DeviceSetComputeMode = libnvml.DeviceSetComputeMode + DeviceSetConfComputeUnprotectedMemSize = libnvml.DeviceSetConfComputeUnprotectedMemSize + DeviceSetCpuAffinity = libnvml.DeviceSetCpuAffinity + DeviceSetDefaultAutoBoostedClocksEnabled = libnvml.DeviceSetDefaultAutoBoostedClocksEnabled + DeviceSetDefaultFanSpeed_v2 = libnvml.DeviceSetDefaultFanSpeed_v2 + DeviceSetDriverModel = libnvml.DeviceSetDriverModel + DeviceSetEccMode = libnvml.DeviceSetEccMode + DeviceSetFanControlPolicy = libnvml.DeviceSetFanControlPolicy + DeviceSetFanSpeed_v2 = libnvml.DeviceSetFanSpeed_v2 + DeviceSetGpcClkVfOffset = libnvml.DeviceSetGpcClkVfOffset + DeviceSetGpuLockedClocks = libnvml.DeviceSetGpuLockedClocks + DeviceSetGpuOperationMode = libnvml.DeviceSetGpuOperationMode + DeviceSetMemClkVfOffset = libnvml.DeviceSetMemClkVfOffset + DeviceSetMemoryLockedClocks = libnvml.DeviceSetMemoryLockedClocks + DeviceSetMigMode = libnvml.DeviceSetMigMode + DeviceSetNvLinkDeviceLowPowerThreshold = libnvml.DeviceSetNvLinkDeviceLowPowerThreshold + DeviceSetNvLinkUtilizationControl = libnvml.DeviceSetNvLinkUtilizationControl + DeviceSetPersistenceMode = libnvml.DeviceSetPersistenceMode + DeviceSetPowerManagementLimit = libnvml.DeviceSetPowerManagementLimit + DeviceSetPowerManagementLimit_v2 = libnvml.DeviceSetPowerManagementLimit_v2 + DeviceSetTemperatureThreshold = libnvml.DeviceSetTemperatureThreshold + DeviceSetVgpuCapabilities = libnvml.DeviceSetVgpuCapabilities + DeviceSetVgpuHeterogeneousMode = libnvml.DeviceSetVgpuHeterogeneousMode + DeviceSetVgpuSchedulerState = libnvml.DeviceSetVgpuSchedulerState + DeviceSetVirtualizationMode = libnvml.DeviceSetVirtualizationMode + DeviceValidateInforom = libnvml.DeviceValidateInforom + ErrorString = libnvml.ErrorString + EventSetCreate = libnvml.EventSetCreate + EventSetFree = libnvml.EventSetFree + EventSetWait = libnvml.EventSetWait + Extensions = libnvml.Extensions + GetExcludedDeviceCount = libnvml.GetExcludedDeviceCount + GetExcludedDeviceInfoByIndex = libnvml.GetExcludedDeviceInfoByIndex + GetVgpuCompatibility = libnvml.GetVgpuCompatibility + GetVgpuDriverCapabilities = libnvml.GetVgpuDriverCapabilities + GetVgpuVersion = libnvml.GetVgpuVersion + GpmMetricsGet = libnvml.GpmMetricsGet + GpmMetricsGetV = libnvml.GpmMetricsGetV + GpmMigSampleGet = libnvml.GpmMigSampleGet + GpmQueryDeviceSupport = libnvml.GpmQueryDeviceSupport + GpmQueryDeviceSupportV = libnvml.GpmQueryDeviceSupportV + GpmQueryIfStreamingEnabled = libnvml.GpmQueryIfStreamingEnabled + GpmSampleAlloc = libnvml.GpmSampleAlloc + GpmSampleFree = libnvml.GpmSampleFree + GpmSampleGet = libnvml.GpmSampleGet + GpmSetStreamingEnabled = libnvml.GpmSetStreamingEnabled + GpuInstanceCreateComputeInstance = libnvml.GpuInstanceCreateComputeInstance + GpuInstanceCreateComputeInstanceWithPlacement = libnvml.GpuInstanceCreateComputeInstanceWithPlacement + GpuInstanceDestroy = libnvml.GpuInstanceDestroy + GpuInstanceGetComputeInstanceById = libnvml.GpuInstanceGetComputeInstanceById + GpuInstanceGetComputeInstancePossiblePlacements = libnvml.GpuInstanceGetComputeInstancePossiblePlacements + GpuInstanceGetComputeInstanceProfileInfo = libnvml.GpuInstanceGetComputeInstanceProfileInfo + GpuInstanceGetComputeInstanceProfileInfoV = libnvml.GpuInstanceGetComputeInstanceProfileInfoV + GpuInstanceGetComputeInstanceRemainingCapacity = libnvml.GpuInstanceGetComputeInstanceRemainingCapacity + GpuInstanceGetComputeInstances = libnvml.GpuInstanceGetComputeInstances + GpuInstanceGetInfo = libnvml.GpuInstanceGetInfo + Init = libnvml.Init + InitWithFlags = libnvml.InitWithFlags + SetVgpuVersion = libnvml.SetVgpuVersion + Shutdown = libnvml.Shutdown + SystemGetConfComputeCapabilities = libnvml.SystemGetConfComputeCapabilities + SystemGetConfComputeKeyRotationThresholdInfo = libnvml.SystemGetConfComputeKeyRotationThresholdInfo + SystemGetConfComputeSettings = libnvml.SystemGetConfComputeSettings + SystemGetCudaDriverVersion = libnvml.SystemGetCudaDriverVersion + SystemGetCudaDriverVersion_v2 = libnvml.SystemGetCudaDriverVersion_v2 + SystemGetDriverVersion = libnvml.SystemGetDriverVersion + SystemGetHicVersion = libnvml.SystemGetHicVersion + SystemGetNVMLVersion = libnvml.SystemGetNVMLVersion + SystemGetProcessName = libnvml.SystemGetProcessName + SystemGetTopologyGpuSet = libnvml.SystemGetTopologyGpuSet + SystemSetConfComputeKeyRotationThresholdInfo = libnvml.SystemSetConfComputeKeyRotationThresholdInfo + UnitGetCount = libnvml.UnitGetCount + UnitGetDevices = libnvml.UnitGetDevices + UnitGetFanSpeedInfo = libnvml.UnitGetFanSpeedInfo + UnitGetHandleByIndex = libnvml.UnitGetHandleByIndex + UnitGetLedState = libnvml.UnitGetLedState + UnitGetPsuInfo = libnvml.UnitGetPsuInfo + UnitGetTemperature = libnvml.UnitGetTemperature + UnitGetUnitInfo = libnvml.UnitGetUnitInfo + UnitSetLedState = libnvml.UnitSetLedState + VgpuInstanceClearAccountingPids = libnvml.VgpuInstanceClearAccountingPids + VgpuInstanceGetAccountingMode = libnvml.VgpuInstanceGetAccountingMode + VgpuInstanceGetAccountingPids = libnvml.VgpuInstanceGetAccountingPids + VgpuInstanceGetAccountingStats = libnvml.VgpuInstanceGetAccountingStats + VgpuInstanceGetEccMode = libnvml.VgpuInstanceGetEccMode + VgpuInstanceGetEncoderCapacity = libnvml.VgpuInstanceGetEncoderCapacity + VgpuInstanceGetEncoderSessions = libnvml.VgpuInstanceGetEncoderSessions + VgpuInstanceGetEncoderStats = libnvml.VgpuInstanceGetEncoderStats + VgpuInstanceGetFBCSessions = libnvml.VgpuInstanceGetFBCSessions + VgpuInstanceGetFBCStats = libnvml.VgpuInstanceGetFBCStats + VgpuInstanceGetFbUsage = libnvml.VgpuInstanceGetFbUsage + VgpuInstanceGetFrameRateLimit = libnvml.VgpuInstanceGetFrameRateLimit + VgpuInstanceGetGpuInstanceId = libnvml.VgpuInstanceGetGpuInstanceId + VgpuInstanceGetGpuPciId = libnvml.VgpuInstanceGetGpuPciId + VgpuInstanceGetLicenseInfo = libnvml.VgpuInstanceGetLicenseInfo + VgpuInstanceGetLicenseStatus = libnvml.VgpuInstanceGetLicenseStatus + VgpuInstanceGetMdevUUID = libnvml.VgpuInstanceGetMdevUUID + VgpuInstanceGetMetadata = libnvml.VgpuInstanceGetMetadata + VgpuInstanceGetType = libnvml.VgpuInstanceGetType + VgpuInstanceGetUUID = libnvml.VgpuInstanceGetUUID + VgpuInstanceGetVmDriverVersion = libnvml.VgpuInstanceGetVmDriverVersion + VgpuInstanceGetVmID = libnvml.VgpuInstanceGetVmID + VgpuInstanceSetEncoderCapacity = libnvml.VgpuInstanceSetEncoderCapacity + VgpuTypeGetCapabilities = libnvml.VgpuTypeGetCapabilities + VgpuTypeGetClass = libnvml.VgpuTypeGetClass + VgpuTypeGetDeviceID = libnvml.VgpuTypeGetDeviceID + VgpuTypeGetFrameRateLimit = libnvml.VgpuTypeGetFrameRateLimit + VgpuTypeGetFramebufferSize = libnvml.VgpuTypeGetFramebufferSize + VgpuTypeGetGpuInstanceProfileId = libnvml.VgpuTypeGetGpuInstanceProfileId + VgpuTypeGetLicense = libnvml.VgpuTypeGetLicense + VgpuTypeGetMaxInstances = libnvml.VgpuTypeGetMaxInstances + VgpuTypeGetMaxInstancesPerVm = libnvml.VgpuTypeGetMaxInstancesPerVm + VgpuTypeGetName = libnvml.VgpuTypeGetName + VgpuTypeGetNumDisplayHeads = libnvml.VgpuTypeGetNumDisplayHeads + VgpuTypeGetResolution = libnvml.VgpuTypeGetResolution +) + +// Interface represents the interface for the library type. +// +//go:generate moq -out mock/interface.go -pkg mock . Interface:Interface +type Interface interface { + ComputeInstanceDestroy(ComputeInstance) Return + ComputeInstanceGetInfo(ComputeInstance) (ComputeInstanceInfo, Return) + DeviceClearAccountingPids(Device) Return + DeviceClearCpuAffinity(Device) Return + DeviceClearEccErrorCounts(Device, EccCounterType) Return + DeviceClearFieldValues(Device, []FieldValue) Return + DeviceCreateGpuInstance(Device, *GpuInstanceProfileInfo) (GpuInstance, Return) + DeviceCreateGpuInstanceWithPlacement(Device, *GpuInstanceProfileInfo, *GpuInstancePlacement) (GpuInstance, Return) + DeviceDiscoverGpus() (PciInfo, Return) + DeviceFreezeNvLinkUtilizationCounter(Device, int, int, EnableState) Return + DeviceGetAPIRestriction(Device, RestrictedAPI) (EnableState, Return) + DeviceGetAccountingBufferSize(Device) (int, Return) + DeviceGetAccountingMode(Device) (EnableState, Return) + DeviceGetAccountingPids(Device) ([]int, Return) + DeviceGetAccountingStats(Device, uint32) (AccountingStats, Return) + DeviceGetActiveVgpus(Device) ([]VgpuInstance, Return) + DeviceGetAdaptiveClockInfoStatus(Device) (uint32, Return) + DeviceGetApplicationsClock(Device, ClockType) (uint32, Return) + DeviceGetArchitecture(Device) (DeviceArchitecture, Return) + DeviceGetAttributes(Device) (DeviceAttributes, Return) + DeviceGetAutoBoostedClocksEnabled(Device) (EnableState, EnableState, Return) + DeviceGetBAR1MemoryInfo(Device) (BAR1Memory, Return) + DeviceGetBoardId(Device) (uint32, Return) + DeviceGetBoardPartNumber(Device) (string, Return) + DeviceGetBrand(Device) (BrandType, Return) + DeviceGetBridgeChipInfo(Device) (BridgeChipHierarchy, Return) + DeviceGetBusType(Device) (BusType, Return) + DeviceGetC2cModeInfoV(Device) C2cModeInfoHandler + DeviceGetClkMonStatus(Device) (ClkMonStatus, Return) + DeviceGetClock(Device, ClockType, ClockId) (uint32, Return) + DeviceGetClockInfo(Device, ClockType) (uint32, Return) + DeviceGetComputeInstanceId(Device) (int, Return) + DeviceGetComputeMode(Device) (ComputeMode, Return) + DeviceGetComputeRunningProcesses(Device) ([]ProcessInfo, Return) + DeviceGetConfComputeGpuAttestationReport(Device) (ConfComputeGpuAttestationReport, Return) + DeviceGetConfComputeGpuCertificate(Device) (ConfComputeGpuCertificate, Return) + DeviceGetConfComputeMemSizeInfo(Device) (ConfComputeMemSizeInfo, Return) + DeviceGetConfComputeProtectedMemoryUsage(Device) (Memory, Return) + DeviceGetCount() (int, Return) + DeviceGetCpuAffinity(Device, int) ([]uint, Return) + DeviceGetCpuAffinityWithinScope(Device, int, AffinityScope) ([]uint, Return) + DeviceGetCreatableVgpus(Device) ([]VgpuTypeId, Return) + DeviceGetCudaComputeCapability(Device) (int, int, Return) + DeviceGetCurrPcieLinkGeneration(Device) (int, Return) + DeviceGetCurrPcieLinkWidth(Device) (int, Return) + DeviceGetCurrentClocksEventReasons(Device) (uint64, Return) + DeviceGetCurrentClocksThrottleReasons(Device) (uint64, Return) + DeviceGetDecoderUtilization(Device) (uint32, uint32, Return) + DeviceGetDefaultApplicationsClock(Device, ClockType) (uint32, Return) + DeviceGetDefaultEccMode(Device) (EnableState, Return) + DeviceGetDetailedEccErrors(Device, MemoryErrorType, EccCounterType) (EccErrorCounts, Return) + DeviceGetDeviceHandleFromMigDeviceHandle(Device) (Device, Return) + DeviceGetDisplayActive(Device) (EnableState, Return) + DeviceGetDisplayMode(Device) (EnableState, Return) + DeviceGetDriverModel(Device) (DriverModel, DriverModel, Return) + DeviceGetDynamicPstatesInfo(Device) (GpuDynamicPstatesInfo, Return) + DeviceGetEccMode(Device) (EnableState, EnableState, Return) + DeviceGetEncoderCapacity(Device, EncoderType) (int, Return) + DeviceGetEncoderSessions(Device) ([]EncoderSessionInfo, Return) + DeviceGetEncoderStats(Device) (int, uint32, uint32, Return) + DeviceGetEncoderUtilization(Device) (uint32, uint32, Return) + DeviceGetEnforcedPowerLimit(Device) (uint32, Return) + DeviceGetFBCSessions(Device) ([]FBCSessionInfo, Return) + DeviceGetFBCStats(Device) (FBCStats, Return) + DeviceGetFanControlPolicy_v2(Device, int) (FanControlPolicy, Return) + DeviceGetFanSpeed(Device) (uint32, Return) + DeviceGetFanSpeed_v2(Device, int) (uint32, Return) + DeviceGetFieldValues(Device, []FieldValue) Return + DeviceGetGpcClkMinMaxVfOffset(Device) (int, int, Return) + DeviceGetGpcClkVfOffset(Device) (int, Return) + DeviceGetGpuFabricInfo(Device) (GpuFabricInfo, Return) + DeviceGetGpuFabricInfoV(Device) GpuFabricInfoHandler + DeviceGetGpuInstanceById(Device, int) (GpuInstance, Return) + DeviceGetGpuInstanceId(Device) (int, Return) + DeviceGetGpuInstancePossiblePlacements(Device, *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) + DeviceGetGpuInstanceProfileInfo(Device, int) (GpuInstanceProfileInfo, Return) + DeviceGetGpuInstanceProfileInfoV(Device, int) GpuInstanceProfileInfoHandler + DeviceGetGpuInstanceRemainingCapacity(Device, *GpuInstanceProfileInfo) (int, Return) + DeviceGetGpuInstances(Device, *GpuInstanceProfileInfo) ([]GpuInstance, Return) + DeviceGetGpuMaxPcieLinkGeneration(Device) (int, Return) + DeviceGetGpuOperationMode(Device) (GpuOperationMode, GpuOperationMode, Return) + DeviceGetGraphicsRunningProcesses(Device) ([]ProcessInfo, Return) + DeviceGetGridLicensableFeatures(Device) (GridLicensableFeatures, Return) + DeviceGetGspFirmwareMode(Device) (bool, bool, Return) + DeviceGetGspFirmwareVersion(Device) (string, Return) + DeviceGetHandleByIndex(int) (Device, Return) + DeviceGetHandleByPciBusId(string) (Device, Return) + DeviceGetHandleBySerial(string) (Device, Return) + DeviceGetHandleByUUID(string) (Device, Return) + DeviceGetHostVgpuMode(Device) (HostVgpuMode, Return) + DeviceGetIndex(Device) (int, Return) + DeviceGetInforomConfigurationChecksum(Device) (uint32, Return) + DeviceGetInforomImageVersion(Device) (string, Return) + DeviceGetInforomVersion(Device, InforomObject) (string, Return) + DeviceGetIrqNum(Device) (int, Return) + DeviceGetJpgUtilization(Device) (uint32, uint32, Return) + DeviceGetLastBBXFlushTime(Device) (uint64, uint, Return) + DeviceGetMPSComputeRunningProcesses(Device) ([]ProcessInfo, Return) + DeviceGetMaxClockInfo(Device, ClockType) (uint32, Return) + DeviceGetMaxCustomerBoostClock(Device, ClockType) (uint32, Return) + DeviceGetMaxMigDeviceCount(Device) (int, Return) + DeviceGetMaxPcieLinkGeneration(Device) (int, Return) + DeviceGetMaxPcieLinkWidth(Device) (int, Return) + DeviceGetMemClkMinMaxVfOffset(Device) (int, int, Return) + DeviceGetMemClkVfOffset(Device) (int, Return) + DeviceGetMemoryAffinity(Device, int, AffinityScope) ([]uint, Return) + DeviceGetMemoryBusWidth(Device) (uint32, Return) + DeviceGetMemoryErrorCounter(Device, MemoryErrorType, EccCounterType, MemoryLocation) (uint64, Return) + DeviceGetMemoryInfo(Device) (Memory, Return) + DeviceGetMemoryInfo_v2(Device) (Memory_v2, Return) + DeviceGetMigDeviceHandleByIndex(Device, int) (Device, Return) + DeviceGetMigMode(Device) (int, int, Return) + DeviceGetMinMaxClockOfPState(Device, ClockType, Pstates) (uint32, uint32, Return) + DeviceGetMinMaxFanSpeed(Device) (int, int, Return) + DeviceGetMinorNumber(Device) (int, Return) + DeviceGetModuleId(Device) (int, Return) + DeviceGetMultiGpuBoard(Device) (int, Return) + DeviceGetName(Device) (string, Return) + DeviceGetNumFans(Device) (int, Return) + DeviceGetNumGpuCores(Device) (int, Return) + DeviceGetNumaNodeId(Device) (int, Return) + DeviceGetNvLinkCapability(Device, int, NvLinkCapability) (uint32, Return) + DeviceGetNvLinkErrorCounter(Device, int, NvLinkErrorCounter) (uint64, Return) + DeviceGetNvLinkRemoteDeviceType(Device, int) (IntNvLinkDeviceType, Return) + DeviceGetNvLinkRemotePciInfo(Device, int) (PciInfo, Return) + DeviceGetNvLinkState(Device, int) (EnableState, Return) + DeviceGetNvLinkUtilizationControl(Device, int, int) (NvLinkUtilizationControl, Return) + DeviceGetNvLinkUtilizationCounter(Device, int, int) (uint64, uint64, Return) + DeviceGetNvLinkVersion(Device, int) (uint32, Return) + DeviceGetOfaUtilization(Device) (uint32, uint32, Return) + DeviceGetP2PStatus(Device, Device, GpuP2PCapsIndex) (GpuP2PStatus, Return) + DeviceGetPciInfo(Device) (PciInfo, Return) + DeviceGetPciInfoExt(Device) (PciInfoExt, Return) + DeviceGetPcieLinkMaxSpeed(Device) (uint32, Return) + DeviceGetPcieReplayCounter(Device) (int, Return) + DeviceGetPcieSpeed(Device) (int, Return) + DeviceGetPcieThroughput(Device, PcieUtilCounter) (uint32, Return) + DeviceGetPerformanceState(Device) (Pstates, Return) + DeviceGetPersistenceMode(Device) (EnableState, Return) + DeviceGetPgpuMetadataString(Device) (string, Return) + DeviceGetPowerManagementDefaultLimit(Device) (uint32, Return) + DeviceGetPowerManagementLimit(Device) (uint32, Return) + DeviceGetPowerManagementLimitConstraints(Device) (uint32, uint32, Return) + DeviceGetPowerManagementMode(Device) (EnableState, Return) + DeviceGetPowerSource(Device) (PowerSource, Return) + DeviceGetPowerState(Device) (Pstates, Return) + DeviceGetPowerUsage(Device) (uint32, Return) + DeviceGetProcessUtilization(Device, uint64) ([]ProcessUtilizationSample, Return) + DeviceGetProcessesUtilizationInfo(Device) (ProcessesUtilizationInfo, Return) + DeviceGetRemappedRows(Device) (int, int, bool, bool, Return) + DeviceGetRetiredPages(Device, PageRetirementCause) ([]uint64, Return) + DeviceGetRetiredPagesPendingStatus(Device) (EnableState, Return) + DeviceGetRetiredPages_v2(Device, PageRetirementCause) ([]uint64, []uint64, Return) + DeviceGetRowRemapperHistogram(Device) (RowRemapperHistogramValues, Return) + DeviceGetRunningProcessDetailList(Device) (ProcessDetailList, Return) + DeviceGetSamples(Device, SamplingType, uint64) (ValueType, []Sample, Return) + DeviceGetSerial(Device) (string, Return) + DeviceGetSramEccErrorStatus(Device) (EccSramErrorStatus, Return) + DeviceGetSupportedClocksEventReasons(Device) (uint64, Return) + DeviceGetSupportedClocksThrottleReasons(Device) (uint64, Return) + DeviceGetSupportedEventTypes(Device) (uint64, Return) + DeviceGetSupportedGraphicsClocks(Device, int) (int, uint32, Return) + DeviceGetSupportedMemoryClocks(Device) (int, uint32, Return) + DeviceGetSupportedPerformanceStates(Device) ([]Pstates, Return) + DeviceGetSupportedVgpus(Device) ([]VgpuTypeId, Return) + DeviceGetTargetFanSpeed(Device, int) (int, Return) + DeviceGetTemperature(Device, TemperatureSensors) (uint32, Return) + DeviceGetTemperatureThreshold(Device, TemperatureThresholds) (uint32, Return) + DeviceGetThermalSettings(Device, uint32) (GpuThermalSettings, Return) + DeviceGetTopologyCommonAncestor(Device, Device) (GpuTopologyLevel, Return) + DeviceGetTopologyNearestGpus(Device, GpuTopologyLevel) ([]Device, Return) + DeviceGetTotalEccErrors(Device, MemoryErrorType, EccCounterType) (uint64, Return) + DeviceGetTotalEnergyConsumption(Device) (uint64, Return) + DeviceGetUUID(Device) (string, Return) + DeviceGetUtilizationRates(Device) (Utilization, Return) + DeviceGetVbiosVersion(Device) (string, Return) + DeviceGetVgpuCapabilities(Device, DeviceVgpuCapability) (bool, Return) + DeviceGetVgpuHeterogeneousMode(Device) (VgpuHeterogeneousMode, Return) + DeviceGetVgpuInstancesUtilizationInfo(Device) (VgpuInstancesUtilizationInfo, Return) + DeviceGetVgpuMetadata(Device) (VgpuPgpuMetadata, Return) + DeviceGetVgpuProcessUtilization(Device, uint64) ([]VgpuProcessUtilizationSample, Return) + DeviceGetVgpuProcessesUtilizationInfo(Device) (VgpuProcessesUtilizationInfo, Return) + DeviceGetVgpuSchedulerCapabilities(Device) (VgpuSchedulerCapabilities, Return) + DeviceGetVgpuSchedulerLog(Device) (VgpuSchedulerLog, Return) + DeviceGetVgpuSchedulerState(Device) (VgpuSchedulerGetState, Return) + DeviceGetVgpuTypeCreatablePlacements(Device, VgpuTypeId) (VgpuPlacementList, Return) + DeviceGetVgpuTypeSupportedPlacements(Device, VgpuTypeId) (VgpuPlacementList, Return) + DeviceGetVgpuUtilization(Device, uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) + DeviceGetViolationStatus(Device, PerfPolicyType) (ViolationTime, Return) + DeviceGetVirtualizationMode(Device) (GpuVirtualizationMode, Return) + DeviceIsMigDeviceHandle(Device) (bool, Return) + DeviceModifyDrainState(*PciInfo, EnableState) Return + DeviceOnSameBoard(Device, Device) (int, Return) + DeviceQueryDrainState(*PciInfo) (EnableState, Return) + DeviceRegisterEvents(Device, uint64, EventSet) Return + DeviceRemoveGpu(*PciInfo) Return + DeviceRemoveGpu_v2(*PciInfo, DetachGpuState, PcieLinkState) Return + DeviceResetApplicationsClocks(Device) Return + DeviceResetGpuLockedClocks(Device) Return + DeviceResetMemoryLockedClocks(Device) Return + DeviceResetNvLinkErrorCounters(Device, int) Return + DeviceResetNvLinkUtilizationCounter(Device, int, int) Return + DeviceSetAPIRestriction(Device, RestrictedAPI, EnableState) Return + DeviceSetAccountingMode(Device, EnableState) Return + DeviceSetApplicationsClocks(Device, uint32, uint32) Return + DeviceSetAutoBoostedClocksEnabled(Device, EnableState) Return + DeviceSetComputeMode(Device, ComputeMode) Return + DeviceSetConfComputeUnprotectedMemSize(Device, uint64) Return + DeviceSetCpuAffinity(Device) Return + DeviceSetDefaultAutoBoostedClocksEnabled(Device, EnableState, uint32) Return + DeviceSetDefaultFanSpeed_v2(Device, int) Return + DeviceSetDriverModel(Device, DriverModel, uint32) Return + DeviceSetEccMode(Device, EnableState) Return + DeviceSetFanControlPolicy(Device, int, FanControlPolicy) Return + DeviceSetFanSpeed_v2(Device, int, int) Return + DeviceSetGpcClkVfOffset(Device, int) Return + DeviceSetGpuLockedClocks(Device, uint32, uint32) Return + DeviceSetGpuOperationMode(Device, GpuOperationMode) Return + DeviceSetMemClkVfOffset(Device, int) Return + DeviceSetMemoryLockedClocks(Device, uint32, uint32) Return + DeviceSetMigMode(Device, int) (Return, Return) + DeviceSetNvLinkDeviceLowPowerThreshold(Device, *NvLinkPowerThres) Return + DeviceSetNvLinkUtilizationControl(Device, int, int, *NvLinkUtilizationControl, bool) Return + DeviceSetPersistenceMode(Device, EnableState) Return + DeviceSetPowerManagementLimit(Device, uint32) Return + DeviceSetPowerManagementLimit_v2(Device, *PowerValue_v2) Return + DeviceSetTemperatureThreshold(Device, TemperatureThresholds, int) Return + DeviceSetVgpuCapabilities(Device, DeviceVgpuCapability, EnableState) Return + DeviceSetVgpuHeterogeneousMode(Device, VgpuHeterogeneousMode) Return + DeviceSetVgpuSchedulerState(Device, *VgpuSchedulerSetState) Return + DeviceSetVirtualizationMode(Device, GpuVirtualizationMode) Return + DeviceValidateInforom(Device) Return + ErrorString(Return) string + EventSetCreate() (EventSet, Return) + EventSetFree(EventSet) Return + EventSetWait(EventSet, uint32) (EventData, Return) + Extensions() ExtendedInterface + GetExcludedDeviceCount() (int, Return) + GetExcludedDeviceInfoByIndex(int) (ExcludedDeviceInfo, Return) + GetVgpuCompatibility(*VgpuMetadata, *VgpuPgpuMetadata) (VgpuPgpuCompatibility, Return) + GetVgpuDriverCapabilities(VgpuDriverCapability) (bool, Return) + GetVgpuVersion() (VgpuVersion, VgpuVersion, Return) + GpmMetricsGet(*GpmMetricsGetType) Return + GpmMetricsGetV(*GpmMetricsGetType) GpmMetricsGetVType + GpmMigSampleGet(Device, int, GpmSample) Return + GpmQueryDeviceSupport(Device) (GpmSupport, Return) + GpmQueryDeviceSupportV(Device) GpmSupportV + GpmQueryIfStreamingEnabled(Device) (uint32, Return) + GpmSampleAlloc() (GpmSample, Return) + GpmSampleFree(GpmSample) Return + GpmSampleGet(Device, GpmSample) Return + GpmSetStreamingEnabled(Device, uint32) Return + GpuInstanceCreateComputeInstance(GpuInstance, *ComputeInstanceProfileInfo) (ComputeInstance, Return) + GpuInstanceCreateComputeInstanceWithPlacement(GpuInstance, *ComputeInstanceProfileInfo, *ComputeInstancePlacement) (ComputeInstance, Return) + GpuInstanceDestroy(GpuInstance) Return + GpuInstanceGetComputeInstanceById(GpuInstance, int) (ComputeInstance, Return) + GpuInstanceGetComputeInstancePossiblePlacements(GpuInstance, *ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) + GpuInstanceGetComputeInstanceProfileInfo(GpuInstance, int, int) (ComputeInstanceProfileInfo, Return) + GpuInstanceGetComputeInstanceProfileInfoV(GpuInstance, int, int) ComputeInstanceProfileInfoHandler + GpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance, *ComputeInstanceProfileInfo) (int, Return) + GpuInstanceGetComputeInstances(GpuInstance, *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) + GpuInstanceGetInfo(GpuInstance) (GpuInstanceInfo, Return) + Init() Return + InitWithFlags(uint32) Return + SetVgpuVersion(*VgpuVersion) Return + Shutdown() Return + SystemGetConfComputeCapabilities() (ConfComputeSystemCaps, Return) + SystemGetConfComputeKeyRotationThresholdInfo() (ConfComputeGetKeyRotationThresholdInfo, Return) + SystemGetConfComputeSettings() (SystemConfComputeSettings, Return) + SystemGetCudaDriverVersion() (int, Return) + SystemGetCudaDriverVersion_v2() (int, Return) + SystemGetDriverVersion() (string, Return) + SystemGetHicVersion() ([]HwbcEntry, Return) + SystemGetNVMLVersion() (string, Return) + SystemGetProcessName(int) (string, Return) + SystemGetTopologyGpuSet(int) ([]Device, Return) + SystemSetConfComputeKeyRotationThresholdInfo(ConfComputeSetKeyRotationThresholdInfo) Return + UnitGetCount() (int, Return) + UnitGetDevices(Unit) ([]Device, Return) + UnitGetFanSpeedInfo(Unit) (UnitFanSpeeds, Return) + UnitGetHandleByIndex(int) (Unit, Return) + UnitGetLedState(Unit) (LedState, Return) + UnitGetPsuInfo(Unit) (PSUInfo, Return) + UnitGetTemperature(Unit, int) (uint32, Return) + UnitGetUnitInfo(Unit) (UnitInfo, Return) + UnitSetLedState(Unit, LedColor) Return + VgpuInstanceClearAccountingPids(VgpuInstance) Return + VgpuInstanceGetAccountingMode(VgpuInstance) (EnableState, Return) + VgpuInstanceGetAccountingPids(VgpuInstance) ([]int, Return) + VgpuInstanceGetAccountingStats(VgpuInstance, int) (AccountingStats, Return) + VgpuInstanceGetEccMode(VgpuInstance) (EnableState, Return) + VgpuInstanceGetEncoderCapacity(VgpuInstance) (int, Return) + VgpuInstanceGetEncoderSessions(VgpuInstance) (int, EncoderSessionInfo, Return) + VgpuInstanceGetEncoderStats(VgpuInstance) (int, uint32, uint32, Return) + VgpuInstanceGetFBCSessions(VgpuInstance) (int, FBCSessionInfo, Return) + VgpuInstanceGetFBCStats(VgpuInstance) (FBCStats, Return) + VgpuInstanceGetFbUsage(VgpuInstance) (uint64, Return) + VgpuInstanceGetFrameRateLimit(VgpuInstance) (uint32, Return) + VgpuInstanceGetGpuInstanceId(VgpuInstance) (int, Return) + VgpuInstanceGetGpuPciId(VgpuInstance) (string, Return) + VgpuInstanceGetLicenseInfo(VgpuInstance) (VgpuLicenseInfo, Return) + VgpuInstanceGetLicenseStatus(VgpuInstance) (int, Return) + VgpuInstanceGetMdevUUID(VgpuInstance) (string, Return) + VgpuInstanceGetMetadata(VgpuInstance) (VgpuMetadata, Return) + VgpuInstanceGetType(VgpuInstance) (VgpuTypeId, Return) + VgpuInstanceGetUUID(VgpuInstance) (string, Return) + VgpuInstanceGetVmDriverVersion(VgpuInstance) (string, Return) + VgpuInstanceGetVmID(VgpuInstance) (string, VgpuVmIdType, Return) + VgpuInstanceSetEncoderCapacity(VgpuInstance, int) Return + VgpuTypeGetCapabilities(VgpuTypeId, VgpuCapability) (bool, Return) + VgpuTypeGetClass(VgpuTypeId) (string, Return) + VgpuTypeGetDeviceID(VgpuTypeId) (uint64, uint64, Return) + VgpuTypeGetFrameRateLimit(VgpuTypeId) (uint32, Return) + VgpuTypeGetFramebufferSize(VgpuTypeId) (uint64, Return) + VgpuTypeGetGpuInstanceProfileId(VgpuTypeId) (uint32, Return) + VgpuTypeGetLicense(VgpuTypeId) (string, Return) + VgpuTypeGetMaxInstances(Device, VgpuTypeId) (int, Return) + VgpuTypeGetMaxInstancesPerVm(VgpuTypeId) (int, Return) + VgpuTypeGetName(VgpuTypeId) (string, Return) + VgpuTypeGetNumDisplayHeads(VgpuTypeId) (int, Return) + VgpuTypeGetResolution(VgpuTypeId, int) (uint32, uint32, Return) +} + +// Device represents the interface for the nvmlDevice type. +// +//go:generate moq -out mock/device.go -pkg mock . Device:Device +type Device interface { + ClearAccountingPids() Return + ClearCpuAffinity() Return + ClearEccErrorCounts(EccCounterType) Return + ClearFieldValues([]FieldValue) Return + CreateGpuInstance(*GpuInstanceProfileInfo) (GpuInstance, Return) + CreateGpuInstanceWithPlacement(*GpuInstanceProfileInfo, *GpuInstancePlacement) (GpuInstance, Return) + FreezeNvLinkUtilizationCounter(int, int, EnableState) Return + GetAPIRestriction(RestrictedAPI) (EnableState, Return) + GetAccountingBufferSize() (int, Return) + GetAccountingMode() (EnableState, Return) + GetAccountingPids() ([]int, Return) + GetAccountingStats(uint32) (AccountingStats, Return) + GetActiveVgpus() ([]VgpuInstance, Return) + GetAdaptiveClockInfoStatus() (uint32, Return) + GetApplicationsClock(ClockType) (uint32, Return) + GetArchitecture() (DeviceArchitecture, Return) + GetAttributes() (DeviceAttributes, Return) + GetAutoBoostedClocksEnabled() (EnableState, EnableState, Return) + GetBAR1MemoryInfo() (BAR1Memory, Return) + GetBoardId() (uint32, Return) + GetBoardPartNumber() (string, Return) + GetBrand() (BrandType, Return) + GetBridgeChipInfo() (BridgeChipHierarchy, Return) + GetBusType() (BusType, Return) + GetC2cModeInfoV() C2cModeInfoHandler + GetClkMonStatus() (ClkMonStatus, Return) + GetClock(ClockType, ClockId) (uint32, Return) + GetClockInfo(ClockType) (uint32, Return) + GetComputeInstanceId() (int, Return) + GetComputeMode() (ComputeMode, Return) + GetComputeRunningProcesses() ([]ProcessInfo, Return) + GetConfComputeGpuAttestationReport() (ConfComputeGpuAttestationReport, Return) + GetConfComputeGpuCertificate() (ConfComputeGpuCertificate, Return) + GetConfComputeMemSizeInfo() (ConfComputeMemSizeInfo, Return) + GetConfComputeProtectedMemoryUsage() (Memory, Return) + GetCpuAffinity(int) ([]uint, Return) + GetCpuAffinityWithinScope(int, AffinityScope) ([]uint, Return) + GetCreatableVgpus() ([]VgpuTypeId, Return) + GetCudaComputeCapability() (int, int, Return) + GetCurrPcieLinkGeneration() (int, Return) + GetCurrPcieLinkWidth() (int, Return) + GetCurrentClocksEventReasons() (uint64, Return) + GetCurrentClocksThrottleReasons() (uint64, Return) + GetDecoderUtilization() (uint32, uint32, Return) + GetDefaultApplicationsClock(ClockType) (uint32, Return) + GetDefaultEccMode() (EnableState, Return) + GetDetailedEccErrors(MemoryErrorType, EccCounterType) (EccErrorCounts, Return) + GetDeviceHandleFromMigDeviceHandle() (Device, Return) + GetDisplayActive() (EnableState, Return) + GetDisplayMode() (EnableState, Return) + GetDriverModel() (DriverModel, DriverModel, Return) + GetDynamicPstatesInfo() (GpuDynamicPstatesInfo, Return) + GetEccMode() (EnableState, EnableState, Return) + GetEncoderCapacity(EncoderType) (int, Return) + GetEncoderSessions() ([]EncoderSessionInfo, Return) + GetEncoderStats() (int, uint32, uint32, Return) + GetEncoderUtilization() (uint32, uint32, Return) + GetEnforcedPowerLimit() (uint32, Return) + GetFBCSessions() ([]FBCSessionInfo, Return) + GetFBCStats() (FBCStats, Return) + GetFanControlPolicy_v2(int) (FanControlPolicy, Return) + GetFanSpeed() (uint32, Return) + GetFanSpeed_v2(int) (uint32, Return) + GetFieldValues([]FieldValue) Return + GetGpcClkMinMaxVfOffset() (int, int, Return) + GetGpcClkVfOffset() (int, Return) + GetGpuFabricInfo() (GpuFabricInfo, Return) + GetGpuFabricInfoV() GpuFabricInfoHandler + GetGpuInstanceById(int) (GpuInstance, Return) + GetGpuInstanceId() (int, Return) + GetGpuInstancePossiblePlacements(*GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) + GetGpuInstanceProfileInfo(int) (GpuInstanceProfileInfo, Return) + GetGpuInstanceProfileInfoV(int) GpuInstanceProfileInfoHandler + GetGpuInstanceRemainingCapacity(*GpuInstanceProfileInfo) (int, Return) + GetGpuInstances(*GpuInstanceProfileInfo) ([]GpuInstance, Return) + GetGpuMaxPcieLinkGeneration() (int, Return) + GetGpuOperationMode() (GpuOperationMode, GpuOperationMode, Return) + GetGraphicsRunningProcesses() ([]ProcessInfo, Return) + GetGridLicensableFeatures() (GridLicensableFeatures, Return) + GetGspFirmwareMode() (bool, bool, Return) + GetGspFirmwareVersion() (string, Return) + GetHostVgpuMode() (HostVgpuMode, Return) + GetIndex() (int, Return) + GetInforomConfigurationChecksum() (uint32, Return) + GetInforomImageVersion() (string, Return) + GetInforomVersion(InforomObject) (string, Return) + GetIrqNum() (int, Return) + GetJpgUtilization() (uint32, uint32, Return) + GetLastBBXFlushTime() (uint64, uint, Return) + GetMPSComputeRunningProcesses() ([]ProcessInfo, Return) + GetMaxClockInfo(ClockType) (uint32, Return) + GetMaxCustomerBoostClock(ClockType) (uint32, Return) + GetMaxMigDeviceCount() (int, Return) + GetMaxPcieLinkGeneration() (int, Return) + GetMaxPcieLinkWidth() (int, Return) + GetMemClkMinMaxVfOffset() (int, int, Return) + GetMemClkVfOffset() (int, Return) + GetMemoryAffinity(int, AffinityScope) ([]uint, Return) + GetMemoryBusWidth() (uint32, Return) + GetMemoryErrorCounter(MemoryErrorType, EccCounterType, MemoryLocation) (uint64, Return) + GetMemoryInfo() (Memory, Return) + GetMemoryInfo_v2() (Memory_v2, Return) + GetMigDeviceHandleByIndex(int) (Device, Return) + GetMigMode() (int, int, Return) + GetMinMaxClockOfPState(ClockType, Pstates) (uint32, uint32, Return) + GetMinMaxFanSpeed() (int, int, Return) + GetMinorNumber() (int, Return) + GetModuleId() (int, Return) + GetMultiGpuBoard() (int, Return) + GetName() (string, Return) + GetNumFans() (int, Return) + GetNumGpuCores() (int, Return) + GetNumaNodeId() (int, Return) + GetNvLinkCapability(int, NvLinkCapability) (uint32, Return) + GetNvLinkErrorCounter(int, NvLinkErrorCounter) (uint64, Return) + GetNvLinkRemoteDeviceType(int) (IntNvLinkDeviceType, Return) + GetNvLinkRemotePciInfo(int) (PciInfo, Return) + GetNvLinkState(int) (EnableState, Return) + GetNvLinkUtilizationControl(int, int) (NvLinkUtilizationControl, Return) + GetNvLinkUtilizationCounter(int, int) (uint64, uint64, Return) + GetNvLinkVersion(int) (uint32, Return) + GetOfaUtilization() (uint32, uint32, Return) + GetP2PStatus(Device, GpuP2PCapsIndex) (GpuP2PStatus, Return) + GetPciInfo() (PciInfo, Return) + GetPciInfoExt() (PciInfoExt, Return) + GetPcieLinkMaxSpeed() (uint32, Return) + GetPcieReplayCounter() (int, Return) + GetPcieSpeed() (int, Return) + GetPcieThroughput(PcieUtilCounter) (uint32, Return) + GetPerformanceState() (Pstates, Return) + GetPersistenceMode() (EnableState, Return) + GetPgpuMetadataString() (string, Return) + GetPowerManagementDefaultLimit() (uint32, Return) + GetPowerManagementLimit() (uint32, Return) + GetPowerManagementLimitConstraints() (uint32, uint32, Return) + GetPowerManagementMode() (EnableState, Return) + GetPowerSource() (PowerSource, Return) + GetPowerState() (Pstates, Return) + GetPowerUsage() (uint32, Return) + GetProcessUtilization(uint64) ([]ProcessUtilizationSample, Return) + GetProcessesUtilizationInfo() (ProcessesUtilizationInfo, Return) + GetRemappedRows() (int, int, bool, bool, Return) + GetRetiredPages(PageRetirementCause) ([]uint64, Return) + GetRetiredPagesPendingStatus() (EnableState, Return) + GetRetiredPages_v2(PageRetirementCause) ([]uint64, []uint64, Return) + GetRowRemapperHistogram() (RowRemapperHistogramValues, Return) + GetRunningProcessDetailList() (ProcessDetailList, Return) + GetSamples(SamplingType, uint64) (ValueType, []Sample, Return) + GetSerial() (string, Return) + GetSramEccErrorStatus() (EccSramErrorStatus, Return) + GetSupportedClocksEventReasons() (uint64, Return) + GetSupportedClocksThrottleReasons() (uint64, Return) + GetSupportedEventTypes() (uint64, Return) + GetSupportedGraphicsClocks(int) (int, uint32, Return) + GetSupportedMemoryClocks() (int, uint32, Return) + GetSupportedPerformanceStates() ([]Pstates, Return) + GetSupportedVgpus() ([]VgpuTypeId, Return) + GetTargetFanSpeed(int) (int, Return) + GetTemperature(TemperatureSensors) (uint32, Return) + GetTemperatureThreshold(TemperatureThresholds) (uint32, Return) + GetThermalSettings(uint32) (GpuThermalSettings, Return) + GetTopologyCommonAncestor(Device) (GpuTopologyLevel, Return) + GetTopologyNearestGpus(GpuTopologyLevel) ([]Device, Return) + GetTotalEccErrors(MemoryErrorType, EccCounterType) (uint64, Return) + GetTotalEnergyConsumption() (uint64, Return) + GetUUID() (string, Return) + GetUtilizationRates() (Utilization, Return) + GetVbiosVersion() (string, Return) + GetVgpuCapabilities(DeviceVgpuCapability) (bool, Return) + GetVgpuHeterogeneousMode() (VgpuHeterogeneousMode, Return) + GetVgpuInstancesUtilizationInfo() (VgpuInstancesUtilizationInfo, Return) + GetVgpuMetadata() (VgpuPgpuMetadata, Return) + GetVgpuProcessUtilization(uint64) ([]VgpuProcessUtilizationSample, Return) + GetVgpuProcessesUtilizationInfo() (VgpuProcessesUtilizationInfo, Return) + GetVgpuSchedulerCapabilities() (VgpuSchedulerCapabilities, Return) + GetVgpuSchedulerLog() (VgpuSchedulerLog, Return) + GetVgpuSchedulerState() (VgpuSchedulerGetState, Return) + GetVgpuTypeCreatablePlacements(VgpuTypeId) (VgpuPlacementList, Return) + GetVgpuTypeSupportedPlacements(VgpuTypeId) (VgpuPlacementList, Return) + GetVgpuUtilization(uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) + GetViolationStatus(PerfPolicyType) (ViolationTime, Return) + GetVirtualizationMode() (GpuVirtualizationMode, Return) + GpmMigSampleGet(int, GpmSample) Return + GpmQueryDeviceSupport() (GpmSupport, Return) + GpmQueryDeviceSupportV() GpmSupportV + GpmQueryIfStreamingEnabled() (uint32, Return) + GpmSampleGet(GpmSample) Return + GpmSetStreamingEnabled(uint32) Return + IsMigDeviceHandle() (bool, Return) + OnSameBoard(Device) (int, Return) + RegisterEvents(uint64, EventSet) Return + ResetApplicationsClocks() Return + ResetGpuLockedClocks() Return + ResetMemoryLockedClocks() Return + ResetNvLinkErrorCounters(int) Return + ResetNvLinkUtilizationCounter(int, int) Return + SetAPIRestriction(RestrictedAPI, EnableState) Return + SetAccountingMode(EnableState) Return + SetApplicationsClocks(uint32, uint32) Return + SetAutoBoostedClocksEnabled(EnableState) Return + SetComputeMode(ComputeMode) Return + SetConfComputeUnprotectedMemSize(uint64) Return + SetCpuAffinity() Return + SetDefaultAutoBoostedClocksEnabled(EnableState, uint32) Return + SetDefaultFanSpeed_v2(int) Return + SetDriverModel(DriverModel, uint32) Return + SetEccMode(EnableState) Return + SetFanControlPolicy(int, FanControlPolicy) Return + SetFanSpeed_v2(int, int) Return + SetGpcClkVfOffset(int) Return + SetGpuLockedClocks(uint32, uint32) Return + SetGpuOperationMode(GpuOperationMode) Return + SetMemClkVfOffset(int) Return + SetMemoryLockedClocks(uint32, uint32) Return + SetMigMode(int) (Return, Return) + SetNvLinkDeviceLowPowerThreshold(*NvLinkPowerThres) Return + SetNvLinkUtilizationControl(int, int, *NvLinkUtilizationControl, bool) Return + SetPersistenceMode(EnableState) Return + SetPowerManagementLimit(uint32) Return + SetPowerManagementLimit_v2(*PowerValue_v2) Return + SetTemperatureThreshold(TemperatureThresholds, int) Return + SetVgpuCapabilities(DeviceVgpuCapability, EnableState) Return + SetVgpuHeterogeneousMode(VgpuHeterogeneousMode) Return + SetVgpuSchedulerState(*VgpuSchedulerSetState) Return + SetVirtualizationMode(GpuVirtualizationMode) Return + ValidateInforom() Return + VgpuTypeGetMaxInstances(VgpuTypeId) (int, Return) +} + +// GpuInstance represents the interface for the nvmlGpuInstance type. +// +//go:generate moq -out mock/gpuinstance.go -pkg mock . GpuInstance:GpuInstance +type GpuInstance interface { + CreateComputeInstance(*ComputeInstanceProfileInfo) (ComputeInstance, Return) + CreateComputeInstanceWithPlacement(*ComputeInstanceProfileInfo, *ComputeInstancePlacement) (ComputeInstance, Return) + Destroy() Return + GetComputeInstanceById(int) (ComputeInstance, Return) + GetComputeInstancePossiblePlacements(*ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) + GetComputeInstanceProfileInfo(int, int) (ComputeInstanceProfileInfo, Return) + GetComputeInstanceProfileInfoV(int, int) ComputeInstanceProfileInfoHandler + GetComputeInstanceRemainingCapacity(*ComputeInstanceProfileInfo) (int, Return) + GetComputeInstances(*ComputeInstanceProfileInfo) ([]ComputeInstance, Return) + GetInfo() (GpuInstanceInfo, Return) +} + +// ComputeInstance represents the interface for the nvmlComputeInstance type. +// +//go:generate moq -out mock/computeinstance.go -pkg mock . ComputeInstance:ComputeInstance +type ComputeInstance interface { + Destroy() Return + GetInfo() (ComputeInstanceInfo, Return) +} + +// EventSet represents the interface for the nvmlEventSet type. +// +//go:generate moq -out mock/eventset.go -pkg mock . EventSet:EventSet +type EventSet interface { + Free() Return + Wait(uint32) (EventData, Return) +} + +// GpmSample represents the interface for the nvmlGpmSample type. +// +//go:generate moq -out mock/gpmsample.go -pkg mock . GpmSample:GpmSample +type GpmSample interface { + Free() Return + Get(Device) Return + MigGet(Device, int) Return +} + +// Unit represents the interface for the nvmlUnit type. +// +//go:generate moq -out mock/unit.go -pkg mock . Unit:Unit +type Unit interface { + GetDevices() ([]Device, Return) + GetFanSpeedInfo() (UnitFanSpeeds, Return) + GetLedState() (LedState, Return) + GetPsuInfo() (PSUInfo, Return) + GetTemperature(int) (uint32, Return) + GetUnitInfo() (UnitInfo, Return) + SetLedState(LedColor) Return +} + +// VgpuInstance represents the interface for the nvmlVgpuInstance type. +// +//go:generate moq -out mock/vgpuinstance.go -pkg mock . VgpuInstance:VgpuInstance +type VgpuInstance interface { + ClearAccountingPids() Return + GetAccountingMode() (EnableState, Return) + GetAccountingPids() ([]int, Return) + GetAccountingStats(int) (AccountingStats, Return) + GetEccMode() (EnableState, Return) + GetEncoderCapacity() (int, Return) + GetEncoderSessions() (int, EncoderSessionInfo, Return) + GetEncoderStats() (int, uint32, uint32, Return) + GetFBCSessions() (int, FBCSessionInfo, Return) + GetFBCStats() (FBCStats, Return) + GetFbUsage() (uint64, Return) + GetFrameRateLimit() (uint32, Return) + GetGpuInstanceId() (int, Return) + GetGpuPciId() (string, Return) + GetLicenseInfo() (VgpuLicenseInfo, Return) + GetLicenseStatus() (int, Return) + GetMdevUUID() (string, Return) + GetMetadata() (VgpuMetadata, Return) + GetType() (VgpuTypeId, Return) + GetUUID() (string, Return) + GetVmDriverVersion() (string, Return) + GetVmID() (string, VgpuVmIdType, Return) + SetEncoderCapacity(int) Return +} + +// VgpuTypeId represents the interface for the nvmlVgpuTypeId type. +// +//go:generate moq -out mock/vgputypeid.go -pkg mock . VgpuTypeId:VgpuTypeId +type VgpuTypeId interface { + GetCapabilities(VgpuCapability) (bool, Return) + GetClass() (string, Return) + GetCreatablePlacements(Device) (VgpuPlacementList, Return) + GetDeviceID() (uint64, uint64, Return) + GetFrameRateLimit() (uint32, Return) + GetFramebufferSize() (uint64, Return) + GetGpuInstanceProfileId() (uint32, Return) + GetLicense() (string, Return) + GetMaxInstances(Device) (int, Return) + GetMaxInstancesPerVm() (int, Return) + GetName() (string, Return) + GetNumDisplayHeads() (int, Return) + GetResolution(int) (uint32, uint32, Return) + GetSupportedPlacements(Device) (VgpuPlacementList, Return) +} diff --git a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/LICENSE b/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/LICENSE deleted file mode 100644 index 2a718d63da7..00000000000 --- a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2018, NVIDIA Corporation -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/bindings.go b/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/bindings.go deleted file mode 100644 index 4bba898342f..00000000000 --- a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/bindings.go +++ /dev/null @@ -1,634 +0,0 @@ -// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. - -package nvml - -// #cgo LDFLAGS: -ldl -Wl,--unresolved-symbols=ignore-in-object-files -// #include "nvml_dl.h" -import "C" - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "sort" - "strconv" - "strings" -) - -const ( - szDriver = C.NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE - szName = C.NVML_DEVICE_NAME_BUFFER_SIZE - szUUID = C.NVML_DEVICE_UUID_BUFFER_SIZE - szProcs = 32 - szProcName = 64 - - XidCriticalError = C.nvmlEventTypeXidCriticalError -) - -type handle struct{ dev C.nvmlDevice_t } -type EventSet struct{ set C.nvmlEventSet_t } -type Event struct { - UUID *string - Etype uint64 - Edata uint64 -} - -func uintPtr(c C.uint) *uint { - i := uint(c) - return &i -} - -func uint64Ptr(c C.ulonglong) *uint64 { - i := uint64(c) - return &i -} - -func stringPtr(c *C.char) *string { - s := C.GoString(c) - return &s -} - -func errorString(ret C.nvmlReturn_t) error { - if ret == C.NVML_SUCCESS { - return nil - } - err := C.GoString(C.nvmlErrorString(ret)) - return fmt.Errorf("nvml: %v", err) -} - -func init_() error { - r := C.nvmlInit_dl() - if r == C.NVML_ERROR_LIBRARY_NOT_FOUND { - return errors.New("could not load NVML library") - } - return errorString(r) -} - -func NewEventSet() EventSet { - var set C.nvmlEventSet_t - C.nvmlEventSetCreate(&set) - - return EventSet{set} -} - -func RegisterEvent(es EventSet, event int) error { - n, err := deviceGetCount() - if err != nil { - return err - } - - var i uint - for i = 0; i < n; i++ { - h, err := deviceGetHandleByIndex(i) - if err != nil { - return err - } - - r := C.nvmlDeviceRegisterEvents(h.dev, C.ulonglong(event), es.set) - if r != C.NVML_SUCCESS { - return errorString(r) - } - } - - return nil -} - -func RegisterEventForDevice(es EventSet, event int, uuid string) error { - n, err := deviceGetCount() - if err != nil { - return err - } - - var i uint - for i = 0; i < n; i++ { - h, err := deviceGetHandleByIndex(i) - if err != nil { - return err - } - - duuid, err := h.deviceGetUUID() - if err != nil { - return err - } - - if *duuid != uuid { - continue - } - - r := C.nvmlDeviceRegisterEvents(h.dev, C.ulonglong(event), es.set) - if r != C.NVML_SUCCESS { - return errorString(r) - } - - return nil - } - - return fmt.Errorf("nvml: device not found") -} - -func DeleteEventSet(es EventSet) { - C.nvmlEventSetFree(es.set) -} - -func WaitForEvent(es EventSet, timeout uint) (Event, error) { - var data C.nvmlEventData_t - - r := C.nvmlEventSetWait(es.set, &data, C.uint(timeout)) - uuid, _ := handle{data.device}.deviceGetUUID() - - return Event{ - UUID: uuid, - Etype: uint64(data.eventType), - Edata: uint64(data.eventData), - }, - errorString(r) -} - -func shutdown() error { - return errorString(C.nvmlShutdown_dl()) -} - -func systemGetDriverVersion() (string, error) { - var driver [szDriver]C.char - - r := C.nvmlSystemGetDriverVersion(&driver[0], szDriver) - return C.GoString(&driver[0]), errorString(r) -} - -func systemGetProcessName(pid uint) (string, error) { - var proc [szProcName]C.char - - r := C.nvmlSystemGetProcessName(C.uint(pid), &proc[0], szProcName) - return C.GoString(&proc[0]), errorString(r) -} - -func deviceGetCount() (uint, error) { - var n C.uint - - r := C.nvmlDeviceGetCount(&n) - return uint(n), errorString(r) -} - -func deviceGetHandleByIndex(idx uint) (handle, error) { - var dev C.nvmlDevice_t - - r := C.nvmlDeviceGetHandleByIndex(C.uint(idx), &dev) - return handle{dev}, errorString(r) -} - -func deviceGetTopologyCommonAncestor(h1, h2 handle) (*uint, error) { - var level C.nvmlGpuTopologyLevel_t - - r := C.nvmlDeviceGetTopologyCommonAncestor_dl(h1.dev, h2.dev, &level) - if r == C.NVML_ERROR_FUNCTION_NOT_FOUND || r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(C.uint(level)), errorString(r) -} - -func (h handle) deviceGetName() (*string, error) { - var name [szName]C.char - - r := C.nvmlDeviceGetName(h.dev, &name[0], szName) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return stringPtr(&name[0]), errorString(r) -} - -func (h handle) deviceGetUUID() (*string, error) { - var uuid [szUUID]C.char - - r := C.nvmlDeviceGetUUID(h.dev, &uuid[0], szUUID) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return stringPtr(&uuid[0]), errorString(r) -} - -func (h handle) deviceGetPciInfo() (*string, error) { - var pci C.nvmlPciInfo_t - - r := C.nvmlDeviceGetPciInfo(h.dev, &pci) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return stringPtr(&pci.busId[0]), errorString(r) -} - -func (h handle) deviceGetMinorNumber() (*uint, error) { - var minor C.uint - - r := C.nvmlDeviceGetMinorNumber(h.dev, &minor) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(minor), errorString(r) -} - -func (h handle) deviceGetBAR1MemoryInfo() (*uint64, *uint64, error) { - var bar1 C.nvmlBAR1Memory_t - - r := C.nvmlDeviceGetBAR1MemoryInfo(h.dev, &bar1) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - return uint64Ptr(bar1.bar1Total), uint64Ptr(bar1.bar1Used), errorString(r) -} - -func (h handle) deviceGetPowerManagementLimit() (*uint, error) { - var power C.uint - - r := C.nvmlDeviceGetPowerManagementLimit(h.dev, &power) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(power), errorString(r) -} - -func (h handle) deviceGetMaxClockInfo() (*uint, *uint, error) { - var sm, mem C.uint - - r := C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_SM, &sm) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem) - } - return uintPtr(sm), uintPtr(mem), errorString(r) -} - -func (h handle) deviceGetMaxPcieLinkGeneration() (*uint, error) { - var link C.uint - - r := C.nvmlDeviceGetMaxPcieLinkGeneration(h.dev, &link) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(link), errorString(r) -} - -func (h handle) deviceGetMaxPcieLinkWidth() (*uint, error) { - var width C.uint - - r := C.nvmlDeviceGetMaxPcieLinkWidth(h.dev, &width) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(width), errorString(r) -} - -func (h handle) deviceGetPowerUsage() (*uint, error) { - var power C.uint - - r := C.nvmlDeviceGetPowerUsage(h.dev, &power) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(power), errorString(r) -} - -func (h handle) deviceGetTemperature() (*uint, error) { - var temp C.uint - - r := C.nvmlDeviceGetTemperature(h.dev, C.NVML_TEMPERATURE_GPU, &temp) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(temp), errorString(r) -} - -func (h handle) deviceGetUtilizationRates() (*uint, *uint, error) { - var usage C.nvmlUtilization_t - - r := C.nvmlDeviceGetUtilizationRates(h.dev, &usage) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - return uintPtr(usage.gpu), uintPtr(usage.memory), errorString(r) -} - -func (h handle) deviceGetEncoderUtilization() (*uint, error) { - var usage, sampling C.uint - - r := C.nvmlDeviceGetEncoderUtilization(h.dev, &usage, &sampling) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(usage), errorString(r) -} - -func (h handle) deviceGetDecoderUtilization() (*uint, error) { - var usage, sampling C.uint - - r := C.nvmlDeviceGetDecoderUtilization(h.dev, &usage, &sampling) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(usage), errorString(r) -} - -func (h handle) deviceGetMemoryInfo() (totalMem *uint64, devMem DeviceMemory, err error) { - var mem C.nvmlMemory_t - - r := C.nvmlDeviceGetMemoryInfo(h.dev, &mem) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - err = errorString(r) - if r != C.NVML_SUCCESS { - return - } - - totalMem = uint64Ptr(mem.total) - if totalMem != nil { - *totalMem /= 1024 * 1024 // MiB - } - - devMem = DeviceMemory{ - Used: uint64Ptr(mem.used), - Free: uint64Ptr(mem.free), - } - - if devMem.Used != nil { - *devMem.Used /= 1024 * 1024 // MiB - } - - if devMem.Free != nil { - *devMem.Free /= 1024 * 1024 // MiB - } - return -} - -func (h handle) deviceGetClockInfo() (*uint, *uint, error) { - var sm, mem C.uint - - r := C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_SM, &sm) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem) - } - return uintPtr(sm), uintPtr(mem), errorString(r) -} - -func (h handle) deviceGetMemoryErrorCounter() (*uint64, *uint64, *uint64, error) { - var l1, l2, mem C.ulonglong - - r := C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED, - C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L1_CACHE, &l1) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED, - C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L2_CACHE, &l2) - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED, - C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_DEVICE_MEMORY, &mem) - } - return uint64Ptr(l1), uint64Ptr(l2), uint64Ptr(mem), errorString(r) -} - -func (h handle) deviceGetPcieThroughput() (*uint, *uint, error) { - var rx, tx C.uint - - r := C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_RX_BYTES, &rx) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_TX_BYTES, &tx) - } - return uintPtr(rx), uintPtr(tx), errorString(r) -} - -func (h handle) deviceGetComputeRunningProcesses() ([]uint, []uint64, error) { - var procs [szProcs]C.nvmlProcessInfo_t - var count = C.uint(szProcs) - - r := C.nvmlDeviceGetComputeRunningProcesses(h.dev, &count, &procs[0]) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - n := int(count) - pids := make([]uint, n) - mems := make([]uint64, n) - for i := 0; i < n; i++ { - pids[i] = uint(procs[i].pid) - mems[i] = uint64(procs[i].usedGpuMemory) - } - return pids, mems, errorString(r) -} - -func (h handle) deviceGetGraphicsRunningProcesses() ([]uint, []uint64, error) { - var procs [szProcs]C.nvmlProcessInfo_t - var count = C.uint(szProcs) - - r := C.nvmlDeviceGetGraphicsRunningProcesses(h.dev, &count, &procs[0]) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - n := int(count) - pids := make([]uint, n) - mems := make([]uint64, n) - for i := 0; i < n; i++ { - pids[i] = uint(procs[i].pid) - mems[i] = uint64(procs[i].usedGpuMemory) - } - return pids, mems, errorString(r) -} - -func (h handle) deviceGetAllRunningProcesses() ([]ProcessInfo, error) { - cPids, cpMems, err := h.deviceGetComputeRunningProcesses() - if err != nil { - return nil, err - } - - gPids, gpMems, err := h.deviceGetGraphicsRunningProcesses() - if err != nil { - return nil, err - } - - allPids := make(map[uint]ProcessInfo) - - for i, pid := range cPids { - name, err := processName(pid) - if err != nil { - return nil, err - } - allPids[pid] = ProcessInfo{ - PID: pid, - Name: name, - MemoryUsed: cpMems[i] / (1024 * 1024), // MiB - Type: Compute, - } - - } - - for i, pid := range gPids { - pInfo, exists := allPids[pid] - if exists { - pInfo.Type = ComputeAndGraphics - allPids[pid] = pInfo - } else { - name, err := processName(pid) - if err != nil { - return nil, err - } - allPids[pid] = ProcessInfo{ - PID: pid, - Name: name, - MemoryUsed: gpMems[i] / (1024 * 1024), // MiB - Type: Graphics, - } - } - } - - var processInfo []ProcessInfo - for _, v := range allPids { - processInfo = append(processInfo, v) - } - sort.Slice(processInfo, func(i, j int) bool { - return processInfo[i].PID < processInfo[j].PID - }) - - return processInfo, nil -} - -func (h handle) getClocksThrottleReasons() (reason ThrottleReason, err error) { - var clocksThrottleReasons C.ulonglong - - r := C.nvmlDeviceGetCurrentClocksThrottleReasons(h.dev, &clocksThrottleReasons) - - if r == C.NVML_ERROR_NOT_SUPPORTED { - return ThrottleReasonUnknown, nil - } - - if r != C.NVML_SUCCESS { - return ThrottleReasonUnknown, errorString(r) - } - - switch clocksThrottleReasons { - case C.nvmlClocksThrottleReasonGpuIdle: - reason = ThrottleReasonGpuIdle - case C.nvmlClocksThrottleReasonApplicationsClocksSetting: - reason = ThrottleReasonApplicationsClocksSetting - case C.nvmlClocksThrottleReasonSwPowerCap: - reason = ThrottleReasonSwPowerCap - case C.nvmlClocksThrottleReasonHwSlowdown: - reason = ThrottleReasonHwSlowdown - case C.nvmlClocksThrottleReasonSyncBoost: - reason = ThrottleReasonSyncBoost - case C.nvmlClocksThrottleReasonSwThermalSlowdown: - reason = ThrottleReasonSwThermalSlowdown - case C.nvmlClocksThrottleReasonHwThermalSlowdown: - reason = ThrottleReasonHwThermalSlowdown - case C.nvmlClocksThrottleReasonHwPowerBrakeSlowdown: - reason = ThrottleReasonHwPowerBrakeSlowdown - case C.nvmlClocksThrottleReasonDisplayClockSetting: - reason = ThrottleReasonDisplayClockSetting - case C.nvmlClocksThrottleReasonNone: - reason = ThrottleReasonNone - } - return -} - -func (h handle) getPerformanceState() (PerfState, error) { - var pstate C.nvmlPstates_t - - r := C.nvmlDeviceGetPerformanceState(h.dev, &pstate) - - if r == C.NVML_ERROR_NOT_SUPPORTED { - return PerfStateUnknown, nil - } - - if r != C.NVML_SUCCESS { - return PerfStateUnknown, errorString(r) - } - return PerfState(pstate), nil -} - -func processName(pid uint) (string, error) { - f := `/proc/` + strconv.FormatUint(uint64(pid), 10) + `/comm` - d, err := ioutil.ReadFile(f) - - if err != nil { - // TOCTOU: process terminated - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - return strings.TrimSuffix(string(d), "\n"), err -} - -func (h handle) getAccountingInfo() (accountingInfo Accounting, err error) { - var mode C.nvmlEnableState_t - var buffer C.uint - - r := C.nvmlDeviceGetAccountingMode(h.dev, &mode) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - if r != C.NVML_SUCCESS { - return accountingInfo, errorString(r) - } - - r = C.nvmlDeviceGetAccountingBufferSize(h.dev, &buffer) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - if r != C.NVML_SUCCESS { - return accountingInfo, errorString(r) - } - - accountingInfo = Accounting{ - Mode: ModeState(mode), - BufferSize: uintPtr(buffer), - } - return -} - -func (h handle) getDisplayInfo() (display Display, err error) { - var mode, isActive C.nvmlEnableState_t - - r := C.nvmlDeviceGetDisplayActive(h.dev, &mode) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - if r != C.NVML_SUCCESS { - return display, errorString(r) - } - - r = C.nvmlDeviceGetDisplayMode(h.dev, &isActive) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - if r != C.NVML_SUCCESS { - return display, errorString(r) - } - display = Display{ - Mode: ModeState(mode), - Active: ModeState(isActive), - } - return -} - -func (h handle) getPeristenceMode() (state ModeState, err error) { - var mode C.nvmlEnableState_t - - r := C.nvmlDeviceGetPersistenceMode(h.dev, &mode) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - return ModeState(mode), errorString(r) -} diff --git a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.go b/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.go deleted file mode 100644 index f6ec9e8fae3..00000000000 --- a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. - -package nvml - -// #include "nvml_dl.h" -import "C" - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "strconv" - "strings" -) - -var ( - ErrCPUAffinity = errors.New("failed to retrieve CPU affinity") - ErrUnsupportedP2PLink = errors.New("unsupported P2P link type") - ErrUnsupportedGPU = errors.New("unsupported GPU device") -) - -type ModeState uint - -const ( - Enabled ModeState = iota - Disabled -) - -func (m ModeState) String() string { - switch m { - case Enabled: - return "Enabled" - case Disabled: - return "Disabled" - } - return "N/A" -} - -type Display struct { - Mode ModeState - Active ModeState -} - -type Accounting struct { - Mode ModeState - BufferSize *uint -} - -type DeviceMode struct { - DisplayInfo Display - Persistence ModeState - AccountingInfo Accounting -} - -type ThrottleReason uint - -const ( - ThrottleReasonGpuIdle ThrottleReason = iota - ThrottleReasonApplicationsClocksSetting - ThrottleReasonSwPowerCap - ThrottleReasonHwSlowdown - ThrottleReasonSyncBoost - ThrottleReasonSwThermalSlowdown - ThrottleReasonHwThermalSlowdown - ThrottleReasonHwPowerBrakeSlowdown - ThrottleReasonDisplayClockSetting - ThrottleReasonNone - ThrottleReasonUnknown -) - -func (r ThrottleReason) String() string { - switch r { - case ThrottleReasonGpuIdle: - return "Gpu Idle" - case ThrottleReasonApplicationsClocksSetting: - return "Applications Clocks Setting" - case ThrottleReasonSwPowerCap: - return "SW Power Cap" - case ThrottleReasonHwSlowdown: - return "HW Slowdown" - case ThrottleReasonSyncBoost: - return "Sync Boost" - case ThrottleReasonSwThermalSlowdown: - return "SW Thermal Slowdown" - case ThrottleReasonHwThermalSlowdown: - return "HW Thermal Slowdown" - case ThrottleReasonHwPowerBrakeSlowdown: - return "HW Power Brake Slowdown" - case ThrottleReasonDisplayClockSetting: - return "Display Clock Setting" - case ThrottleReasonNone: - return "No clocks throttling" - } - return "N/A" -} - -type PerfState uint - -const ( - PerfStateMax = 0 - PerfStateMin = 15 - PerfStateUnknown = 32 -) - -func (p PerfState) String() string { - if p >= PerfStateMax && p <= PerfStateMin { - return fmt.Sprintf("P%d", p) - } - return "Unknown" -} - -type ProcessType uint - -const ( - Compute ProcessType = iota - Graphics - ComputeAndGraphics -) - -func (t ProcessType) String() string { - typ := "C+G" - if t == Compute { - typ = "C" - } else if t == Graphics { - typ = "G" - } - return typ -} - -type P2PLinkType uint - -const ( - P2PLinkUnknown P2PLinkType = iota - P2PLinkCrossCPU - P2PLinkSameCPU - P2PLinkHostBridge - P2PLinkMultiSwitch - P2PLinkSingleSwitch - P2PLinkSameBoard -) - -type P2PLink struct { - BusID string - Link P2PLinkType -} - -func (t P2PLinkType) String() string { - switch t { - case P2PLinkCrossCPU: - return "Cross CPU socket" - case P2PLinkSameCPU: - return "Same CPU socket" - case P2PLinkHostBridge: - return "Host PCI bridge" - case P2PLinkMultiSwitch: - return "Multiple PCI switches" - case P2PLinkSingleSwitch: - return "Single PCI switch" - case P2PLinkSameBoard: - return "Same board" - case P2PLinkUnknown: - } - return "N/A" -} - -type ClockInfo struct { - Cores *uint - Memory *uint -} - -type PCIInfo struct { - BusID string - BAR1 *uint64 - Bandwidth *uint -} - -type Device struct { - handle - - UUID string - Path string - Model *string - Power *uint - Memory *uint64 - CPUAffinity *uint - PCI PCIInfo - Clocks ClockInfo - Topology []P2PLink -} - -type UtilizationInfo struct { - GPU *uint - Memory *uint - Encoder *uint - Decoder *uint -} - -type PCIThroughputInfo struct { - RX *uint - TX *uint -} - -type PCIStatusInfo struct { - BAR1Used *uint64 - Throughput PCIThroughputInfo -} - -type ECCErrorsInfo struct { - L1Cache *uint64 - L2Cache *uint64 - Device *uint64 -} - -type DeviceMemory struct { - Used *uint64 - Free *uint64 -} - -type MemoryInfo struct { - Global DeviceMemory - ECCErrors ECCErrorsInfo -} - -type ProcessInfo struct { - PID uint - Name string - MemoryUsed uint64 - Type ProcessType -} - -type DeviceStatus struct { - Power *uint - Temperature *uint - Utilization UtilizationInfo - Memory MemoryInfo - Clocks ClockInfo - PCI PCIStatusInfo - Processes []ProcessInfo - Throttle ThrottleReason - Performance PerfState -} - -func assert(err error) { - if err != nil { - panic(err) - } -} - -func Init() error { - return init_() -} - -func Shutdown() error { - return shutdown() -} - -func GetDeviceCount() (uint, error) { - return deviceGetCount() -} - -func GetDriverVersion() (string, error) { - return systemGetDriverVersion() -} - -func numaNode(busid string) (uint, error) { - // discard leading zeros of busid - b, err := ioutil.ReadFile(fmt.Sprintf("/sys/bus/pci/devices/%s/numa_node", strings.ToLower(busid[4:]))) - if err != nil { - // XXX report node 0 if NUMA support isn't enabled - return 0, nil - } - node, err := strconv.ParseInt(string(bytes.TrimSpace(b)), 10, 8) - if err != nil { - return 0, fmt.Errorf("%v: %v", ErrCPUAffinity, err) - } - if node < 0 { - node = 0 // XXX report node 0 instead of NUMA_NO_NODE - } - return uint(node), nil -} - -func pciBandwidth(gen, width *uint) *uint { - m := map[uint]uint{ - 1: 250, // MB/s - 2: 500, - 3: 985, - 4: 1969, - } - if gen == nil || width == nil { - return nil - } - bw := m[*gen] * *width - return &bw -} - -func NewDevice(idx uint) (device *Device, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - h, err := deviceGetHandleByIndex(idx) - assert(err) - model, err := h.deviceGetName() - assert(err) - uuid, err := h.deviceGetUUID() - assert(err) - minor, err := h.deviceGetMinorNumber() - assert(err) - power, err := h.deviceGetPowerManagementLimit() - assert(err) - totalMem, _, err := h.deviceGetMemoryInfo() - assert(err) - busid, err := h.deviceGetPciInfo() - assert(err) - bar1, _, err := h.deviceGetBAR1MemoryInfo() - assert(err) - pcig, err := h.deviceGetMaxPcieLinkGeneration() - assert(err) - pciw, err := h.deviceGetMaxPcieLinkWidth() - assert(err) - ccore, cmem, err := h.deviceGetMaxClockInfo() - assert(err) - - if minor == nil || busid == nil || uuid == nil { - return nil, ErrUnsupportedGPU - } - path := fmt.Sprintf("/dev/nvidia%d", *minor) - node, err := numaNode(*busid) - assert(err) - - device = &Device{ - handle: h, - UUID: *uuid, - Path: path, - Model: model, - Power: power, - Memory: totalMem, - CPUAffinity: &node, - PCI: PCIInfo{ - BusID: *busid, - BAR1: bar1, - Bandwidth: pciBandwidth(pcig, pciw), // MB/s - }, - Clocks: ClockInfo{ - Cores: ccore, // MHz - Memory: cmem, // MHz - }, - } - if power != nil { - *device.Power /= 1000 // W - } - if bar1 != nil { - *device.PCI.BAR1 /= 1024 * 1024 // MiB - } - return -} - -func NewDeviceLite(idx uint) (device *Device, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - h, err := deviceGetHandleByIndex(idx) - assert(err) - uuid, err := h.deviceGetUUID() - assert(err) - minor, err := h.deviceGetMinorNumber() - assert(err) - busid, err := h.deviceGetPciInfo() - assert(err) - - if minor == nil || busid == nil || uuid == nil { - return nil, ErrUnsupportedGPU - } - path := fmt.Sprintf("/dev/nvidia%d", *minor) - - device = &Device{ - handle: h, - UUID: *uuid, - Path: path, - PCI: PCIInfo{ - BusID: *busid, - }, - } - return -} - -func (d *Device) Status() (status *DeviceStatus, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - power, err := d.deviceGetPowerUsage() - assert(err) - temp, err := d.deviceGetTemperature() - assert(err) - ugpu, umem, err := d.deviceGetUtilizationRates() - assert(err) - uenc, err := d.deviceGetEncoderUtilization() - assert(err) - udec, err := d.deviceGetDecoderUtilization() - assert(err) - _, devMem, err := d.deviceGetMemoryInfo() - assert(err) - ccore, cmem, err := d.deviceGetClockInfo() - assert(err) - _, bar1, err := d.deviceGetBAR1MemoryInfo() - assert(err) - el1, el2, emem, err := d.deviceGetMemoryErrorCounter() - assert(err) - pcirx, pcitx, err := d.deviceGetPcieThroughput() - assert(err) - throttle, err := d.getClocksThrottleReasons() - assert(err) - perfState, err := d.getPerformanceState() - assert(err) - processInfo, err := d.deviceGetAllRunningProcesses() - assert(err) - - status = &DeviceStatus{ - Power: power, - Temperature: temp, // °C - Utilization: UtilizationInfo{ - GPU: ugpu, // % - Memory: umem, // % - Encoder: uenc, // % - Decoder: udec, // % - }, - Memory: MemoryInfo{ - Global: devMem, - ECCErrors: ECCErrorsInfo{ - L1Cache: el1, - L2Cache: el2, - Device: emem, - }, - }, - Clocks: ClockInfo{ - Cores: ccore, // MHz - Memory: cmem, // MHz - }, - PCI: PCIStatusInfo{ - BAR1Used: bar1, - Throughput: PCIThroughputInfo{ - RX: pcirx, - TX: pcitx, - }, - }, - Throttle: throttle, - Performance: perfState, - Processes: processInfo, - } - if power != nil { - *status.Power /= 1000 // W - } - if bar1 != nil { - *status.PCI.BAR1Used /= 1024 * 1024 // MiB - } - if pcirx != nil { - *status.PCI.Throughput.RX /= 1000 // MB/s - } - if pcitx != nil { - *status.PCI.Throughput.TX /= 1000 // MB/s - } - return -} - -func GetP2PLink(dev1, dev2 *Device) (link P2PLinkType, err error) { - level, err := deviceGetTopologyCommonAncestor(dev1.handle, dev2.handle) - if err != nil || level == nil { - return P2PLinkUnknown, err - } - - switch *level { - case C.NVML_TOPOLOGY_INTERNAL: - link = P2PLinkSameBoard - case C.NVML_TOPOLOGY_SINGLE: - link = P2PLinkSingleSwitch - case C.NVML_TOPOLOGY_MULTIPLE: - link = P2PLinkMultiSwitch - case C.NVML_TOPOLOGY_HOSTBRIDGE: - link = P2PLinkHostBridge - case C.NVML_TOPOLOGY_CPU: - link = P2PLinkSameCPU - case C.NVML_TOPOLOGY_SYSTEM: - link = P2PLinkCrossCPU - default: - err = ErrUnsupportedP2PLink - } - return -} - -func (d *Device) GetComputeRunningProcesses() ([]uint, []uint64, error) { - return d.handle.deviceGetComputeRunningProcesses() -} - -func (d *Device) GetGraphicsRunningProcesses() ([]uint, []uint64, error) { - return d.handle.deviceGetGraphicsRunningProcesses() -} - -func (d *Device) GetAllRunningProcesses() ([]ProcessInfo, error) { - return d.handle.deviceGetAllRunningProcesses() -} - -func (d *Device) GetDeviceMode() (mode *DeviceMode, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - display, err := d.getDisplayInfo() - assert(err) - - p, err := d.getPeristenceMode() - assert(err) - - accounting, err := d.getAccountingInfo() - assert(err) - - mode = &DeviceMode{ - DisplayInfo: display, - Persistence: p, - AccountingInfo: accounting, - } - return -} diff --git a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.h b/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.h deleted file mode 100644 index 60185dac239..00000000000 --- a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.h +++ /dev/null @@ -1,5871 +0,0 @@ -/* - * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. - * - * NOTICE TO USER: - * - * This source code is subject to NVIDIA ownership rights under U.S. and - * international Copyright laws. Users and possessors of this source code - * are hereby granted a nonexclusive, royalty-free license to use this code - * in individual and commercial software. - * - * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE - * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR - * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH - * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. - * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, - * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS - * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE - * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE - * OR PERFORMANCE OF THIS SOURCE CODE. - * - * U.S. Government End Users. This source code is a "commercial item" as - * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of - * "commercial computer software" and "commercial computer software - * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) - * and is provided to the U.S. Government only as a commercial end item. - * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through - * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the - * source code with only those rights set forth herein. - * - * Any use of this source code in individual and commercial software must - * include, in the user documentation and internal comments to the code, - * the above Disclaimer and U.S. Government End Users Notice. - */ - -/* -NVML API Reference - -The NVIDIA Management Library (NVML) is a C-based programmatic interface for monitoring and -managing various states within NVIDIA Tesla &tm; GPUs. It is intended to be a platform for building -3rd party applications, and is also the underlying library for the NVIDIA-supported nvidia-smi -tool. NVML is thread-safe so it is safe to make simultaneous NVML calls from multiple threads. - -API Documentation - -Supported platforms: -- Windows: Windows Server 2008 R2 64bit, Windows Server 2012 R2 64bit, Windows 7 64bit, Windows 8 64bit, Windows 10 64bit -- Linux: 32-bit and 64-bit -- Hypervisors: Windows Server 2008R2/2012 Hyper-V 64bit, Citrix XenServer 6.2 SP1+, VMware ESX 5.1/5.5 - -Supported products: -- Full Support - - All Tesla products, starting with the Fermi architecture - - All Quadro products, starting with the Fermi architecture - - All GRID products, starting with the Kepler architecture - - Selected GeForce Titan products -- Limited Support - - All Geforce products, starting with the Fermi architecture - -The NVML library can be found at \%ProgramW6432\%\\"NVIDIA Corporation"\\NVSMI\\ on Windows. It is -not be added to the system path by default. To dynamically link to NVML, add this path to the PATH -environmental variable. To dynamically load NVML, call LoadLibrary with this path. - -On Linux the NVML library will be found on the standard library path. For 64 bit Linux, both the 32 bit -and 64 bit NVML libraries will be installed. - -Online documentation for this library is available at http://docs.nvidia.com/deploy/nvml-api/index.html -*/ - -#ifndef __nvml_nvml_h__ -#define __nvml_nvml_h__ - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * On Windows, set up methods for DLL export - * define NVML_STATIC_IMPORT when using nvml_loader library - */ -#if defined _WINDOWS - #if !defined NVML_STATIC_IMPORT - #if defined NVML_LIB_EXPORT - #define DECLDIR __declspec(dllexport) - #else - #define DECLDIR __declspec(dllimport) - #endif - #else - #define DECLDIR - #endif -#else - #define DECLDIR -#endif - -/** - * NVML API versioning support - */ -#define NVML_API_VERSION 9 -#define NVML_API_VERSION_STR "9" -#define nvmlInit nvmlInit_v2 -#define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v3 -#define nvmlDeviceGetCount nvmlDeviceGetCount_v2 -#define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 -#define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 -#define nvmlDeviceGetNvLinkRemotePciInfo nvmlDeviceGetNvLinkRemotePciInfo_v2 -#define nvmlDeviceRemoveGpu nvmlDeviceRemoveGpu_v2 - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceStructs Device Structs - * @{ - */ -/***************************************************************************************************/ - -/** - * Special constant that some fields take when they are not available. - * Used when only part of the struct is not available. - * - * Each structure explicitly states when to check for this value. - */ -#define NVML_VALUE_NOT_AVAILABLE (-1) - -typedef struct nvmlDevice_st* nvmlDevice_t; - -/** - * Buffer size guaranteed to be large enough for pci bus id - */ -#define NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE 32 - -/** - * Buffer size guaranteed to be large enough for pci bus id for ::busIdLegacy - */ -#define NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE 16 - -/** - * PCI information about a GPU device. - */ -typedef struct nvmlPciInfo_st -{ - char busIdLegacy[NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE]; //!< The legacy tuple domain:bus:device.function PCI identifier (& NULL terminator) - unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffffffff - unsigned int bus; //!< The bus on which the device resides, 0 to 0xff - unsigned int device; //!< The device's id on the bus, 0 to 31 - unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id - - // Added in NVML 2.285 API - unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID - - char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) -} nvmlPciInfo_t; - -/** - * Detailed ECC error counts for a device. - * - * @deprecated Different GPU families can have different memory error counters - * See \ref nvmlDeviceGetMemoryErrorCounter - */ -typedef struct nvmlEccErrorCounts_st -{ - unsigned long long l1Cache; //!< L1 cache errors - unsigned long long l2Cache; //!< L2 cache errors - unsigned long long deviceMemory; //!< Device memory errors - unsigned long long registerFile; //!< Register file errors -} nvmlEccErrorCounts_t; - -/** - * Utilization information for a device. - * Each sample period may be between 1 second and 1/6 second, depending on the product being queried. - */ -typedef struct nvmlUtilization_st -{ - unsigned int gpu; //!< Percent of time over the past sample period during which one or more kernels was executing on the GPU - unsigned int memory; //!< Percent of time over the past sample period during which global (device) memory was being read or written -} nvmlUtilization_t; - -/** - * Memory allocation information for a device. - */ -typedef struct nvmlMemory_st -{ - unsigned long long total; //!< Total installed FB memory (in bytes) - unsigned long long free; //!< Unallocated FB memory (in bytes) - unsigned long long used; //!< Allocated FB memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping -} nvmlMemory_t; - -/** - * BAR1 Memory allocation Information for a device - */ -typedef struct nvmlBAR1Memory_st -{ - unsigned long long bar1Total; //!< Total BAR1 Memory (in bytes) - unsigned long long bar1Free; //!< Unallocated BAR1 Memory (in bytes) - unsigned long long bar1Used; //!< Allocated Used Memory (in bytes) -}nvmlBAR1Memory_t; - -/** - * Information about running compute processes on the GPU - */ -typedef struct nvmlProcessInfo_st -{ - unsigned int pid; //!< Process ID - unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. - //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported - //! because Windows KMD manages all the memory and not the NVIDIA driver -} nvmlProcessInfo_t; - -/** - * Enum to represent type of bridge chip - */ -typedef enum nvmlBridgeChipType_enum -{ - NVML_BRIDGE_CHIP_PLX = 0, - NVML_BRIDGE_CHIP_BRO4 = 1 -}nvmlBridgeChipType_t; - -/** - * Maximum number of NvLink links supported - */ -#define NVML_NVLINK_MAX_LINKS 6 - -/** - * Enum to represent the NvLink utilization counter packet units - */ -typedef enum nvmlNvLinkUtilizationCountUnits_enum -{ - NVML_NVLINK_COUNTER_UNIT_CYCLES = 0, // count by cycles - NVML_NVLINK_COUNTER_UNIT_PACKETS = 1, // count by packets - NVML_NVLINK_COUNTER_UNIT_BYTES = 2, // count by bytes - - // this must be last - NVML_NVLINK_COUNTER_UNIT_COUNT -} nvmlNvLinkUtilizationCountUnits_t; - -/** - * Enum to represent the NvLink utilization counter packet types to count - * ** this is ONLY applicable with the units as packets or bytes - * ** as specified in \a nvmlNvLinkUtilizationCountUnits_t - * ** all packet filter descriptions are target GPU centric - * ** these can be "OR'd" together - */ -typedef enum nvmlNvLinkUtilizationCountPktTypes_enum -{ - NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1, // no operation packets - NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2, // read packets - NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4, // write packets - NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8, // reduction atomic requests - NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10, // non-reduction atomic requests - NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20, // flush requests - NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40, // responses with data - NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80, // responses without data - NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF // all packets -} nvmlNvLinkUtilizationCountPktTypes_t; - -/** - * Struct to define the NVLINK counter controls - */ -typedef struct nvmlNvLinkUtilizationControl_st -{ - nvmlNvLinkUtilizationCountUnits_t units; - nvmlNvLinkUtilizationCountPktTypes_t pktfilter; -} nvmlNvLinkUtilizationControl_t; - -/** - * Enum to represent NvLink queryable capabilities - */ -typedef enum nvmlNvLinkCapability_enum -{ - NVML_NVLINK_CAP_P2P_SUPPORTED = 0, // P2P over NVLink is supported - NVML_NVLINK_CAP_SYSMEM_ACCESS = 1, // Access to system memory is supported - NVML_NVLINK_CAP_P2P_ATOMICS = 2, // P2P atomics are supported - NVML_NVLINK_CAP_SYSMEM_ATOMICS= 3, // System memory atomics are supported - NVML_NVLINK_CAP_SLI_BRIDGE = 4, // SLI is supported over this link - NVML_NVLINK_CAP_VALID = 5, // Link is supported on this device - // should be last - NVML_NVLINK_CAP_COUNT -} nvmlNvLinkCapability_t; - -/** - * Enum to represent NvLink queryable error counters - */ -typedef enum nvmlNvLinkErrorCounter_enum -{ - NVML_NVLINK_ERROR_DL_REPLAY = 0, // Data link transmit replay error counter - NVML_NVLINK_ERROR_DL_RECOVERY = 1, // Data link transmit recovery error counter - NVML_NVLINK_ERROR_DL_CRC_FLIT = 2, // Data link receive flow control digit CRC error counter - NVML_NVLINK_ERROR_DL_CRC_DATA = 3, // Data link receive data CRC error counter - - // this must be last - NVML_NVLINK_ERROR_COUNT -} nvmlNvLinkErrorCounter_t; - -/** - * Represents level relationships within a system between two GPUs - * The enums are spaced to allow for future relationships - */ -typedef enum nvmlGpuLevel_enum -{ - NVML_TOPOLOGY_INTERNAL = 0, // e.g. Tesla K80 - NVML_TOPOLOGY_SINGLE = 10, // all devices that only need traverse a single PCIe switch - NVML_TOPOLOGY_MULTIPLE = 20, // all devices that need not traverse a host bridge - NVML_TOPOLOGY_HOSTBRIDGE = 30, // all devices that are connected to the same host bridge - NVML_TOPOLOGY_NODE = 40, // all devices that are connected to the same NUMA node but possibly multiple host bridges - NVML_TOPOLOGY_SYSTEM = 50, // all devices in the system - - // there is purposefully no COUNT here because of the need for spacing above -} nvmlGpuTopologyLevel_t; - -/* Compatibility for CPU->NODE renaming */ -#define NVML_TOPOLOGY_CPU NVML_TOPOLOGY_NODE - -/* P2P Capability Index Status*/ -typedef enum nvmlGpuP2PStatus_enum -{ - NVML_P2P_STATUS_OK = 0, - NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, - NVML_P2P_STATUS_GPU_NOT_SUPPORTED, - NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED, - NVML_P2P_STATUS_DISABLED_BY_REGKEY, - NVML_P2P_STATUS_NOT_SUPPORTED, - NVML_P2P_STATUS_UNKNOWN - -} nvmlGpuP2PStatus_t; - -/* P2P Capability Index*/ -typedef enum nvmlGpuP2PCapsIndex_enum -{ - NVML_P2P_CAPS_INDEX_READ = 0, - NVML_P2P_CAPS_INDEX_WRITE, - NVML_P2P_CAPS_INDEX_NVLINK, - NVML_P2P_CAPS_INDEX_ATOMICS, - NVML_P2P_CAPS_INDEX_PROP, - NVML_P2P_CAPS_INDEX_UNKNOWN -}nvmlGpuP2PCapsIndex_t; - -/** - * Maximum limit on Physical Bridges per Board - */ -#define NVML_MAX_PHYSICAL_BRIDGE (128) - -/** - * Information about the Bridge Chip Firmware - */ -typedef struct nvmlBridgeChipInfo_st -{ - nvmlBridgeChipType_t type; //!< Type of Bridge Chip - unsigned int fwVersion; //!< Firmware Version. 0=Version is unavailable -}nvmlBridgeChipInfo_t; - -/** - * This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate - * bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth. - */ -typedef struct nvmlBridgeChipHierarchy_st -{ - unsigned char bridgeCount; //!< Number of Bridge Chips on the Board - nvmlBridgeChipInfo_t bridgeChipInfo[NVML_MAX_PHYSICAL_BRIDGE]; //!< Hierarchy of Bridge Chips on the board -}nvmlBridgeChipHierarchy_t; - -/** - * Represents Type of Sampling Event - */ -typedef enum nvmlSamplingType_enum -{ - NVML_TOTAL_POWER_SAMPLES = 0, //!< To represent total power drawn by GPU - NVML_GPU_UTILIZATION_SAMPLES = 1, //!< To represent percent of time during which one or more kernels was executing on the GPU - NVML_MEMORY_UTILIZATION_SAMPLES = 2, //!< To represent percent of time during which global (device) memory was being read or written - NVML_ENC_UTILIZATION_SAMPLES = 3, //!< To represent percent of time during which NVENC remains busy - NVML_DEC_UTILIZATION_SAMPLES = 4, //!< To represent percent of time during which NVDEC remains busy - NVML_PROCESSOR_CLK_SAMPLES = 5, //!< To represent processor clock samples - NVML_MEMORY_CLK_SAMPLES = 6, //!< To represent memory clock samples - - // Keep this last - NVML_SAMPLINGTYPE_COUNT -}nvmlSamplingType_t; - -/** - * Represents the queryable PCIe utilization counters - */ -typedef enum nvmlPcieUtilCounter_enum -{ - NVML_PCIE_UTIL_TX_BYTES = 0, // 1KB granularity - NVML_PCIE_UTIL_RX_BYTES = 1, // 1KB granularity - - // Keep this last - NVML_PCIE_UTIL_COUNT -} nvmlPcieUtilCounter_t; - -/** - * Represents the type for sample value returned - */ -typedef enum nvmlValueType_enum -{ - NVML_VALUE_TYPE_DOUBLE = 0, - NVML_VALUE_TYPE_UNSIGNED_INT = 1, - NVML_VALUE_TYPE_UNSIGNED_LONG = 2, - NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3, - NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4, - - // Keep this last - NVML_VALUE_TYPE_COUNT -}nvmlValueType_t; - - -/** - * Union to represent different types of Value - */ -typedef union nvmlValue_st -{ - double dVal; //!< If the value is double - unsigned int uiVal; //!< If the value is unsigned int - unsigned long ulVal; //!< If the value is unsigned long - unsigned long long ullVal; //!< If the value is unsigned long long - signed long long sllVal; //!< If the value is signed long long -}nvmlValue_t; - -/** - * Information for Sample - */ -typedef struct nvmlSample_st -{ - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - nvmlValue_t sampleValue; //!< Sample Value -}nvmlSample_t; - -/** - * Represents type of perf policy for which violation times can be queried - */ -typedef enum nvmlPerfPolicyType_enum -{ - NVML_PERF_POLICY_POWER = 0, //!< How long did power violations cause the GPU to be below application clocks - NVML_PERF_POLICY_THERMAL = 1, //!< How long did thermal violations cause the GPU to be below application clocks - NVML_PERF_POLICY_SYNC_BOOST = 2, //!< How long did sync boost cause the GPU to be below application clocks - NVML_PERF_POLICY_BOARD_LIMIT = 3, //!< How long did the board limit cause the GPU to be below application clocks - NVML_PERF_POLICY_LOW_UTILIZATION = 4, //!< How long did low utilization cause the GPU to be below application clocks - NVML_PERF_POLICY_RELIABILITY = 5, //!< How long did the board reliability limit cause the GPU to be below application clocks - - NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10, //!< Total time the GPU was held below application clocks by any limiter (0 - 5 above) - NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11, //!< Total time the GPU was held below base clocks - - // Keep this last - NVML_PERF_POLICY_COUNT -}nvmlPerfPolicyType_t; - -/** - * Struct to hold perf policy violation status data - */ -typedef struct nvmlViolationTime_st -{ - unsigned long long referenceTime; //!< referenceTime represents CPU timestamp in microseconds - unsigned long long violationTime; //!< violationTime in Nanoseconds -}nvmlViolationTime_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceEnumvs Device Enums - * @{ - */ -/***************************************************************************************************/ - -/** - * Generic enable/disable enum. - */ -typedef enum nvmlEnableState_enum -{ - NVML_FEATURE_DISABLED = 0, //!< Feature disabled - NVML_FEATURE_ENABLED = 1 //!< Feature enabled -} nvmlEnableState_t; - -//! Generic flag used to specify the default behavior of some functions. See description of particular functions for details. -#define nvmlFlagDefault 0x00 -//! Generic flag used to force some behavior. See description of particular functions for details. -#define nvmlFlagForce 0x01 - -/** - * * The Brand of the GPU - * */ -typedef enum nvmlBrandType_enum -{ - NVML_BRAND_UNKNOWN = 0, - NVML_BRAND_QUADRO = 1, - NVML_BRAND_TESLA = 2, - NVML_BRAND_NVS = 3, - NVML_BRAND_GRID = 4, - NVML_BRAND_GEFORCE = 5, - NVML_BRAND_TITAN = 6, - - // Keep this last - NVML_BRAND_COUNT -} nvmlBrandType_t; - -/** - * Temperature thresholds. - */ -typedef enum nvmlTemperatureThresholds_enum -{ - NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will shut down - // for HW protection - NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will begin HW slowdown - NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2, // Memory Temperature at which the GPU will begin SW slowdown - NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3, // GPU Temperature at which the GPU can be throttled below base clock - // Keep this last - NVML_TEMPERATURE_THRESHOLD_COUNT -} nvmlTemperatureThresholds_t; - -/** - * Temperature sensors. - */ -typedef enum nvmlTemperatureSensors_enum -{ - NVML_TEMPERATURE_GPU = 0, //!< Temperature sensor for the GPU die - - // Keep this last - NVML_TEMPERATURE_COUNT -} nvmlTemperatureSensors_t; - -/** - * Compute mode. - * - * NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0. - * Earlier CUDA versions supported a single exclusive mode, - * which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond. - */ -typedef enum nvmlComputeMode_enum -{ - NVML_COMPUTEMODE_DEFAULT = 0, //!< Default compute mode -- multiple contexts per device - NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1, //!< Support Removed - NVML_COMPUTEMODE_PROHIBITED = 2, //!< Compute-prohibited mode -- no contexts per device - NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3, //!< Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time - - // Keep this last - NVML_COMPUTEMODE_COUNT -} nvmlComputeMode_t; - -/** - * ECC bit types. - * - * @deprecated See \ref nvmlMemoryErrorType_t for a more flexible type - */ -#define nvmlEccBitType_t nvmlMemoryErrorType_t - -/** - * Single bit ECC errors - * - * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_CORRECTED - */ -#define NVML_SINGLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_CORRECTED - -/** - * Double bit ECC errors - * - * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_UNCORRECTED - */ -#define NVML_DOUBLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_UNCORRECTED - -/** - * Memory error types - */ -typedef enum nvmlMemoryErrorType_enum -{ - /** - * A memory error that was corrected - * - * For ECC errors, these are single bit errors - * For Texture memory, these are errors fixed by resend - */ - NVML_MEMORY_ERROR_TYPE_CORRECTED = 0, - /** - * A memory error that was not corrected - * - * For ECC errors, these are double bit errors - * For Texture memory, these are errors where the resend fails - */ - NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1, - - - // Keep this last - NVML_MEMORY_ERROR_TYPE_COUNT //!< Count of memory error types - -} nvmlMemoryErrorType_t; - -/** - * ECC counter types. - * - * Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent. - * On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver - * client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app - * is run. - */ -typedef enum nvmlEccCounterType_enum -{ - NVML_VOLATILE_ECC = 0, //!< Volatile counts are reset each time the driver loads. - NVML_AGGREGATE_ECC = 1, //!< Aggregate counts persist across reboots (i.e. for the lifetime of the device) - - // Keep this last - NVML_ECC_COUNTER_TYPE_COUNT //!< Count of memory counter types -} nvmlEccCounterType_t; - -/** - * Clock types. - * - * All speeds are in Mhz. - */ -typedef enum nvmlClockType_enum -{ - NVML_CLOCK_GRAPHICS = 0, //!< Graphics clock domain - NVML_CLOCK_SM = 1, //!< SM clock domain - NVML_CLOCK_MEM = 2, //!< Memory clock domain - NVML_CLOCK_VIDEO = 3, //!< Video encoder/decoder clock domain - - // Keep this last - NVML_CLOCK_COUNT //usedGpuMemory is not supported - - - unsigned long long time; //!< Amount of time in ms during which the compute context was active. The time is reported as 0 if - //!< the process is not terminated - - unsigned long long startTime; //!< CPU Timestamp in usec representing start time for the process - - unsigned int isRunning; //!< Flag to represent if the process is running (1 for running, 0 for terminated) - - unsigned int reserved[5]; //!< Reserved for future use -} nvmlAccountingStats_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpuConstants Vgpu Constants - * @{ - */ -/***************************************************************************************************/ - -/** - * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense - */ -#define NVML_GRID_LICENSE_BUFFER_SIZE 128 - -#define NVML_VGPU_NAME_BUFFER_SIZE 64 - -#define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 - -/*! - * Macros for pGPU's virtualization capabilities bitfield. - */ -#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION 0:0 -#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_NO 0x0 -#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_YES 0x1 - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpuEnum Vgpu Enum - * @{ - */ -/***************************************************************************************************/ - -/*! - * Types of VM identifiers - */ -typedef enum nvmlVgpuVmIdType { - NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID - NVML_VGPU_VM_ID_UUID = 1, //!< VM ID represents UUID -} nvmlVgpuVmIdType_t; - -// vGPU GUEST info state. -typedef enum nvmlVgpuGuestInfoState_enum -{ - NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //= 0 and < \a unitCount - * @param unit Reference in which to return the unit handle - * - * @return - * - \ref NVML_SUCCESS if \a unit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t *unit); - -/** - * Retrieves the static information associated with a unit. - * - * For S-class products. - * - * See \ref nvmlUnitInfo_t for details on available unit info. - * - * @param unit The identifier of the target unit - * @param info Reference in which to return the unit information - * - * @return - * - \ref NVML_SUCCESS if \a info has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL - */ -nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t *info); - -/** - * Retrieves the LED state associated with this unit. - * - * For S-class products. - * - * See \ref nvmlLedState_t for details on allowed states. - * - * @param unit The identifier of the target unit - * @param state Reference in which to return the current LED state - * - * @return - * - \ref NVML_SUCCESS if \a state has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlUnitSetLedState() - */ -nvmlReturn_t DECLDIR nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t *state); - -/** - * Retrieves the PSU stats for the unit. - * - * For S-class products. - * - * See \ref nvmlPSUInfo_t for details on available PSU info. - * - * @param unit The identifier of the target unit - * @param psu Reference in which to return the PSU information - * - * @return - * - \ref NVML_SUCCESS if \a psu has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t *psu); - -/** - * Retrieves the temperature readings for the unit, in degrees C. - * - * For S-class products. - * - * Depending on the product, readings may be available for intake (type=0), - * exhaust (type=1) and board (type=2). - * - * @param unit The identifier of the target unit - * @param type The type of reading to take - * @param temp Reference in which to return the intake temperature - * - * @return - * - \ref NVML_SUCCESS if \a temp has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int *temp); - -/** - * Retrieves the fan speed readings for the unit. - * - * For S-class products. - * - * See \ref nvmlUnitFanSpeeds_t for details on available fan speed info. - * - * @param unit The identifier of the target unit - * @param fanSpeeds Reference in which to return the fan speed information - * - * @return - * - \ref NVML_SUCCESS if \a fanSpeeds has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t *fanSpeeds); - -/** - * Retrieves the set of GPU devices that are attached to the specified unit. - * - * For S-class products. - * - * The \a deviceCount argument is expected to be set to the size of the input \a devices array. - * - * @param unit The identifier of the target unit - * @param deviceCount Reference in which to provide the \a devices array size, and - * to return the number of attached GPU devices - * @param devices Reference in which to return the references to the attached GPU devices - * - * @return - * - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int *deviceCount, nvmlDevice_t *devices); - -/** - * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. - * - * For S-class products. - * - * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. - * The HIC must be connected to an S-class system for it to be reported by this function. - * - * @param hwbcCount Size of hwbcEntries array - * @param hwbcEntries Array holding information about hwbc - * - * @return - * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small - */ -nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceQueries Device Queries - * This chapter describes that queries that NVML can perform against each device. - * In each case the device is identified with an nvmlDevice_t handle. This handle is obtained by - * calling one of \ref nvmlDeviceGetHandleByIndex(), \ref nvmlDeviceGetHandleBySerial(), - * \ref nvmlDeviceGetHandleByPciBusId(). or \ref nvmlDeviceGetHandleByUUID(). - * @{ - */ -/***************************************************************************************************/ - - /** - * Retrieves the number of compute devices in the system. A compute device is a single GPU. - * - * For all products. - * - * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system - * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. - * Update your code to handle this error, or use NVML 4.304 or older nvml header file. - * For backward binary compatibility reasons _v1 version of the API is still present in the shared - * library. - * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. - * - * @param deviceCount Reference in which to return the number of accessible devices - * - * @return - * - \ref NVML_SUCCESS if \a deviceCount has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCount(unsigned int *deviceCount); - -/** - * Acquire the handle for a particular device, based on its index. - * - * For all products. - * - * Valid indices are derived from the \a accessibleDevices count returned by - * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices - * are 0 and 1, corresponding to GPU 0 and GPU 1. - * - * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it - * is recommended that devices be looked up by their PCI ids or UUID. See - * \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId(). - * - * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs if: - * - The target GPU is an SLI slave - * - * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system - * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. - * Update your code to handle this error, or use NVML 4.304 or older nvml header file. - * For backward binary compatibility reasons _v1 version of the API is still present in the shared - * library. - * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. - * - * This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index. - * If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't - * need to worry about that. - * - * @param index The index of the target GPU, >= 0 and < \a accessibleDevices - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetIndex - * @see nvmlDeviceGetCount - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its board serial number. - * - * For Fermi &tm; or newer fully supported devices. - * - * This number corresponds to the value printed directly on the board, and to the value returned by - * \ref nvmlDeviceGetSerial(). - * - * @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor - * of \ref nvmlDeviceGetHandleByUUID. - * For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT. - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs as it searches for the target GPU - * - * @param serial The board serial number of the target GPU - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one - * device has the same serial (dual GPU boards) - * - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetSerial - * @see nvmlDeviceGetHandleByUUID - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device. - * - * For all products. - * - * @param uuid The UUID of the target GPU - * @param device Reference in which to return the device handle - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs as it searches for the target GPU - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null - * - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetUUID - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its PCI bus id. - * - * For all products. - * - * This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo(). - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs if: - * - The target GPU is an SLI slave - * - * \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND - * instead of NVML_ERROR_NO_PERMISSION. - * - * @param pciBusId The PCI bus id of the target GPU - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId(const char *pciBusId, nvmlDevice_t *device); - -/** - * Retrieves the name of this device. - * - * For all products. - * - * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not - * exceed 64 characters in length (including the NULL terminator). See \ref - * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param name Reference in which to return the product name - * @param length The maximum allowed length of the string returned in \a name - * - * @return - * - \ref NVML_SUCCESS if \a name has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length); - -/** - * Retrieves the brand of this device. - * - * For all products. - * - * The type is a member of \ref nvmlBrandType_t defined above. - * - * @param device The identifier of the target device - * @param type Reference in which to return the product brand type - * - * @return - * - \ref NVML_SUCCESS if \a name has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t *type); - -/** - * Retrieves the NVML index of this device. - * - * For all products. - * - * Valid indices are derived from the \a accessibleDevices count returned by - * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices - * are 0 and 1, corresponding to GPU 0 and GPU 1. - * - * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it - * is recommended that devices be looked up by their PCI ids or GPU UUID. See - * \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID(). - * - * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. - * - * @param device The identifier of the target device - * @param index Reference in which to return the NVML index of the device - * - * @return - * - \ref NVML_SUCCESS if \a index has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetHandleByIndex() - * @see nvmlDeviceGetCount() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int *index); - -/** - * Retrieves the globally unique board serial number associated with this device's board. - * - * For all products with an inforom. - * - * The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator). - * This number matches the serial number tag that is physically attached to the board. See \ref - * nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param serial Reference in which to return the board/module serial number - * @param length The maximum allowed length of the string returned in \a serial - * - * @return - * - \ref NVML_SUCCESS if \a serial has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSerial(nvmlDevice_t device, char *serial, unsigned int length); - -/** - * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device - * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, - * result[0] = 0x3, result[1] = 0x3 - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * @param cpuSetSize The size of the cpuSet array that is safe to access - * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per - * unsigned long on 64-bit machines, 32 on 32-bit machines - * - * @return - * - \ref NVML_SUCCESS if \a cpuAffinity has been filled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet); - -/** - * Sets the ideal affinity for the calling thread and device using the guidelines - * given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0. - * Older versions set the affinity for a calling process and all children. - * Currently supports up to 64 processors. - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if the calling process has been successfully bound - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetCpuAffinity(nvmlDevice_t device); - -/** - * Clear all affinity bindings for the calling thread. Note, this is a change as of version - * 8.0 as older versions cleared the affinity for a calling process and all children. - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if the calling process has been successfully unbound - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceClearCpuAffinity(nvmlDevice_t device); - -/** - * Retrieve the common ancestor for two devices - * For all products. - * Supported on Linux only. - * - * @param device1 The identifier of the first device - * @param device2 The identifier of the second device - * @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type - * - * @return - * - \ref NVML_SUCCESS if \a pathInfo has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t *pathInfo); - -/** - * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level - * For all products. - * Supported on Linux only. - * - * @param device The identifier of the first device - * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs - * @param count When zero, is set to the number of matching GPUs such that \a deviceArray - * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count - * number of device handles. - * @param deviceArray An array of device handles for GPUs found at \a level - * - * @return - * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int *count, nvmlDevice_t *deviceArray); - -/** - * Retrieve the set of GPUs that have a CPU affinity with the given CPU number - * For all products. - * Supported on Linux only. - * - * @param cpuNumber The CPU number - * @param count When zero, is set to the number of matching GPUs such that \a deviceArray - * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count - * number of device handles. - * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber - * - * @return - * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); - -/** - * Retrieve the status for a given p2p capability index between a given pair of GPU - * - * @param device1 The first device - * @param device2 The second device - * @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2 - * @param p2pStatus Reference in which to return the status of the \a p2pIndex - * between \a device1 and \a device2 - * @return - * - \ref NVML_SUCCESS if \a p2pStatus has been populated - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex,nvmlGpuP2PStatus_t *p2pStatus); - -/** - * Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, - * that augments the immutable, board serial identifier. - * - * For all products. - * - * The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products. - * It does NOT correspond to any identifier printed on the board. It will not exceed 80 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param uuid Reference in which to return the GPU UUID - * @param length The maximum allowed length of the string returned in \a uuid - * - * @return - * - \ref NVML_SUCCESS if \a uuid has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length); - -/** - * Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for - * each GPU will have the form /dev/nvidia[minor number]. - * - * For all products. - * Supported only for Linux - * - * @param device The identifier of the target device - * @param minorNumber Reference in which to return the minor number for the device - * @return - * - \ref NVML_SUCCESS if the minor number is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber); - -/** - * Retrieves the the device board part number which is programmed into the board's InfoROM - * - * For all products. - * - * @param device Identifier of the target device - * @param partNumber Reference to the buffer to return - * @param length Length of the buffer reference - * - * @return - * - \ref NVML_SUCCESS if \a partNumber has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length); - -/** - * Retrieves the version information for the device's infoROM object. - * - * For all products with an inforom. - * - * Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate - * ECC counts. The version of the data structures in this memory may change from time to time. It will not - * exceed 16 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. - * - * See \ref nvmlInforomObject_t for details on the available infoROM objects. - * - * @param device The identifier of the target device - * @param object The target infoROM object - * @param version Reference in which to return the infoROM version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetInforomImageVersion - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char *version, unsigned int length); - -/** - * Retrieves the global infoROM image version - * - * For all products with an inforom. - * - * Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board - * in contrast to infoROM object version which is only an indicator of supported features. - * Version string will not exceed 16 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param version Reference in which to return the infoROM image version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetInforomVersion - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char *version, unsigned int length); - -/** - * Retrieves the checksum of the configuration stored in the device's infoROM. - * - * For all products with an inforom. - * - * Can be used to make sure that two GPUs have the exact same configuration. - * Current checksum takes into account configuration stored in PWR and ECC infoROM objects. - * Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC) - * - * @param device The identifier of the target device - * @param checksum Reference in which to return the infoROM configuration checksum - * - * @return - * - \ref NVML_SUCCESS if \a checksum has been set - * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int *checksum); - -/** - * Reads the infoROM from the flash and verifies the checksums. - * - * For all products with an inforom. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if infoROM is not corrupted - * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceValidateInforom(nvmlDevice_t device); - -/** - * Retrieves the display mode for the device. - * - * For all products. - * - * This method indicates whether a physical display (e.g. monitor) is currently connected to - * any of the device's connectors. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param display Reference in which to return the display mode - * - * @return - * - \ref NVML_SUCCESS if \a display has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t *display); - -/** - * Retrieves the display active state for the device. - * - * For all products. - * - * This method indicates whether a display is initialized on the device. - * For example whether X Server is attached to this device and has allocated memory for the screen. - * - * Display can be active even when no monitor is physically attached. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param isActive Reference in which to return the display active state - * - * @return - * - \ref NVML_SUCCESS if \a isActive has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t *isActive); - -/** - * Retrieves the persistence mode associated with this device. - * - * For all products. - * For Linux only. - * - * When driver persistence mode is enabled the driver software state is not torn down when the last - * client disconnects. By default this feature is disabled. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current driver persistence mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetPersistenceMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Retrieves the PCI attributes of this device. - * - * For all products. - * - * See \ref nvmlPciInfo_t for details on the available PCI info. - * - * @param device The identifier of the target device - * @param pci Reference in which to return the PCI info - * - * @return - * - \ref NVML_SUCCESS if \a pci has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo(nvmlDevice_t device, nvmlPciInfo_t *pci); - -/** - * Retrieves the maximum PCIe link generation possible with this device and system - * - * I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will - * report is generation 1. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param maxLinkGen Reference in which to return the max PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a maxLinkGen has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen); - -/** - * Retrieves the maximum PCIe link width possible with this device and system - * - * I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report - * a max link width of 8. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param maxLinkWidth Reference in which to return the max PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a maxLinkWidth has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int *maxLinkWidth); - -/** - * Retrieves the current PCIe link generation - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param currLinkGen Reference in which to return the current PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a currLinkGen has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int *currLinkGen); - -/** - * Retrieves the current PCIe link width - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param currLinkWidth Reference in which to return the current PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a currLinkWidth has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int *currLinkWidth); - -/** - * Retrieve PCIe utilization information. - * This function is querying a byte counter over a 20ms interval and thus is the - * PCIe throughput over that interval. - * - * For Maxwell &tm; or newer fully supported devices. - * - * This method is not supported in virtual machines running virtual GPU (vGPU). - * - * @param device The identifier of the target device - * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t - * @param value Reference in which to return throughput in KB/s - * - * @return - * - \ref NVML_SUCCESS if \a value has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int *value); - -/** - * Retrieve the PCIe replay counter. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param value Reference in which to return the counter's value - * - * @return - * - \ref NVML_SUCCESS if \a value and \a rollover have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value or \a rollover are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int *value); - -/** - * Retrieves the current clock speeds for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlClockType_t for details on available clock information. - * - * @param device The identifier of the target device - * @param type Identify which clock domain to query - * @param clock Reference in which to return the clock speed in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clock has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - -/** - * Retrieves the maximum clock speeds for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlClockType_t for details on available clock information. - * - * \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks - * by few MHz. - * - * @param device The identifier of the target device - * @param type Identify which clock domain to query - * @param clock Reference in which to return the clock speed in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clock has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - -/** - * Retrieves the current setting of a clock that applications will use unless an overspec situation occurs. - * Can be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Retrieves the default applications clock that GPU boots with or - * defaults to after \ref nvmlDeviceResetApplicationsClocks call. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the default clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * \see nvmlDeviceGetApplicationsClock - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDefaultApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Resets the application clock to the default value - * - * This is the applications clock that will be used after system reboot or driver reload. - * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, - * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above - * base clocks as thermal limits allow. - * - * @see nvmlDeviceGetApplicationsClock - * @see nvmlDeviceSetApplicationsClocks - * - * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); - -/** - * Retrieves the clock speed for the clock specified by the clock type and clock ID. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockId Identify which clock in the domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int *clockMHz); - -/** - * Retrieves the customer defined maximum boost clock speed specified by the given clock type. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param count Reference in which to provide the \a clocksMHz array size, and - * to return the number of elements - * @param clocksMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of - * required elements) - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetApplicationsClocks - * @see nvmlDeviceGetSupportedGraphicsClocks - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int *count, unsigned int *clocksMHz); - -/** - * Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param memoryClockMHz Memory clock for which to return possible graphics clocks - * @param count Reference in which to provide the \a clocksMHz array size, and - * to return the number of elements - * @param clocksMHz Reference in which to return the clocks in MHz - * - * @return - * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetApplicationsClocks - * @see nvmlDeviceGetSupportedMemoryClocks - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); - -/** - * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. - * - * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device - * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will - * revert to when no applications are using the GPU - * - * @return - * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); - -/** - * Try to set the current state of Auto Boosted clocks on a device. - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. - * - * Non-root users may use this API by default but can be restricted by root from using this API by calling - * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. - * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. - * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param enabled What state to try to set Auto Boosted clocks of the target device to - * - * @return - * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); - -/** - * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will - * return to when no compute running processes (e.g. CUDA application which have an active context) are running - * - * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * Requires root/admin permissions. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. - * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param enabled What state to try to set default Auto Boosted clocks of the target device to - * @param flags Flags that change the default behavior. Currently Unused. - * - * @return - * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); - - -/** - * Retrieves the intended operating speed of the device's fan. - * - * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the - * output will not match the actual fan speed. - * - * For all discrete products with dedicated fans. - * - * The fan speed is expressed as a percent of the maximum, i.e. full speed is 100%. - * - * @param device The identifier of the target device - * @param speed Reference in which to return the fan speed percentage - * - * @return - * - \ref NVML_SUCCESS if \a speed has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed); - -/** - * Retrieves the current temperature readings for the device, in degrees C. - * - * For all products. - * - * See \ref nvmlTemperatureSensors_t for details on available temperature sensors. - * - * @param device The identifier of the target device - * @param sensorType Flag that indicates which sensor reading to retrieve - * @param temp Reference in which to return the temperature reading - * - * @return - * - \ref NVML_SUCCESS if \a temp has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); - -/** - * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. - * - * For Kepler &tm; or newer fully supported devices. - * - * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. - * - * @param device The identifier of the target device - * @param thresholdType The type of threshold value queried - * @param temp Reference in which to return the temperature reading - * @return - * - \ref NVML_SUCCESS if \a temp has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); - -/** - * Retrieves the current performance state for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlPstates_t for details on allowed performance states. - * - * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading - * - * @return - * - \ref NVML_SUCCESS if \a pState has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); - -/** - * Retrieves current clocks throttling reasons. - * - * For all fully supported products. - * - * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. - * - * @param device The identifier of the target device - * @param clocksThrottleReasons Reference in which to return bitmask of active clocks throttle - * reasons - * - * @return - * - \ref NVML_SUCCESS if \a clocksThrottleReasons has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksThrottleReasons is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetSupportedClocksThrottleReasons - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); - -/** - * Retrieves bitmask of supported clocks throttle reasons that can be returned by - * \ref nvmlDeviceGetCurrentClocksThrottleReasons - * - * For all fully supported products. - * - * This method is not supported in virtual machines running virtual GPU (vGPU). - * - * @param device The identifier of the target device - * @param supportedClocksThrottleReasons Reference in which to return bitmask of supported - * clocks throttle reasons - * - * @return - * - \ref NVML_SUCCESS if \a supportedClocksThrottleReasons has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksThrottleReasons is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetCurrentClocksThrottleReasons - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); - -/** - * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. - * - * Retrieve the current performance state for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlPstates_t for details on allowed performance states. - * - * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading - * - * @return - * - \ref NVML_SUCCESS if \a pState has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); - -/** - * This API has been deprecated. - * - * Retrieves the power management mode associated with this device. - * - * For products from the Fermi family. - * - Requires \a NVML_INFOROM_POWER version 3.0 or higher. - * - * For from the Kepler or newer families. - * - Does not require \a NVML_INFOROM_POWER object. - * - * This flag indicates whether any power management algorithm is currently active on the device. An - * enabled state does not necessarily mean the device is being actively throttled -- only that - * that the driver will do so if the appropriate conditions are met. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current power management mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Retrieves the power management limit associated with this device. - * - * For Fermi &tm; or newer fully supported devices. - * - * The power limit defines the upper boundary for the card's power draw. If - * the card's total power draw reaches this limit the power management algorithm kicks in. - * - * This reading is only available if power management mode is supported. - * See \ref nvmlDeviceGetPowerManagementMode. - * - * @param device The identifier of the target device - * @param limit Reference in which to return the power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int *limit); - -/** - * Retrieves information about possible values of power management limits on this device. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param minLimit Reference in which to return the minimum power management limit in milliwatts - * @param maxLimit Reference in which to return the maximum power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetPowerManagementLimit - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int *minLimit, unsigned int *maxLimit); - -/** - * Retrieves default power management limit on this device, in milliwatts. - * Default power management limit is a power management limit that the device boots with. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param defaultLimit Reference in which to return the default power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a defaultLimit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int *defaultLimit); - -/** - * Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory) - * - * For Fermi &tm; or newer fully supported devices. - * - * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. - * - * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode. - * - * @param device The identifier of the target device - * @param power Reference in which to return the power usage information - * - * @return - * - \ref NVML_SUCCESS if \a power has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power); - -/** - * Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded - * - * For newer than Pascal &tm; fully supported devices. - * - * @param device The identifier of the target device - * @param energy Reference in which to return the energy consumption information - * - * @return - * - \ref NVML_SUCCESS if \a energy has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long *energy); - -/** - * Get the effective power limit that the driver enforces after taking into account all limiters - * - * Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere - * This includes the out of band power limit interface - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The device to communicate with - * @param limit Reference in which to return the power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int *limit); - -/** - * Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot). - * - * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. - * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. - * Not supported on Quadro ® and Tesla &tm; C-class products. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current GOM - * @param pending Reference in which to return the pending GOM - * - * @return - * - \ref NVML_SUCCESS if \a mode has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlGpuOperationMode_t - * @see nvmlDeviceSetGpuOperationMode - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t *current, nvmlGpuOperationMode_t *pending); - -/** - * Retrieves the amount of used, free and total memory available on the device, in bytes. - * - * For all products. - * - * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. - * Under WDDM most device memory is allocated and managed on startup by Windows. - * - * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated - * by all active channels on the device. - * - * See \ref nvmlMemory_t for details on available memory info. - * - * @param device The identifier of the target device - * @param memory Reference in which to return the memory information - * - * @return - * - \ref NVML_SUCCESS if \a memory has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); - -/** - * Retrieves the current compute mode for the device. - * - * For all products. - * - * See \ref nvmlComputeMode_t for details on allowed compute modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current compute mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetComputeMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t *mode); - -/** - * Retrieves the CUDA compute capability of the device. - * - * For all products. - * - * Returns the major and minor compute capability version numbers of the - * device. The major and minor versions are equivalent to the - * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and - * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be - * returned by CUDA's cuDeviceGetAttribute(). - * - * @param device The identifier of the target device - * @param major Reference in which to return the major CUDA compute capability - * @param minor Reference in which to return the minor CUDA compute capability - * - * @return - * - \ref NVML_SUCCESS if \a major and \a minor have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int *major, int *minor); - -/** - * Retrieves the current and pending ECC modes for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * - * Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following - * the next reboot. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current ECC mode - * @param pending Reference in which to return the pending ECC mode - * - * @return - * - \ref NVML_SUCCESS if \a current and \a pending have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetEccMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t *current, nvmlEnableState_t *pending); - -/** - * Retrieves the device boardId from 0-N. - * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with - * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. - * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across - * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and - * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will - * always return those values but they will always be different from each other). - * - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param boardId Reference in which to return the device's board ID - * - * @return - * - \ref NVML_SUCCESS if \a boardId has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int *boardId); - -/** - * Retrieves whether the device is on a Multi-GPU Board - * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param multiGpuBool Reference in which to return a zero or non-zero value - * to indicate whether the device is on a multi GPU board - * - * @return - * - \ref NVML_SUCCESS if \a multiGpuBool has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int *multiGpuBool); - -/** - * Retrieves the total ECC error counts for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * Requires ECC Mode to be enabled. - * - * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of - * errors across the entire device. - * - * See \ref nvmlMemoryErrorType_t for a description of available error types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types. - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of the errors. - * @param counterType Flag that specifies the counter-type of the errors. - * @param eccCounts Reference in which to return the specified ECC errors - * - * @return - * - \ref NVML_SUCCESS if \a eccCounts has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceClearEccErrorCounts() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long *eccCounts); - -/** - * Retrieves the detailed ECC error counts for the device. - * - * @deprecated This API supports only a fixed set of ECC error locations - * On different GPU architectures different locations are supported - * See \ref nvmlDeviceGetMemoryErrorCounter - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts. - * Requires ECC Mode to be enabled. - * - * Detailed errors provide separate ECC counts for specific parts of the memory system. - * - * Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported. - * - * See \ref nvmlMemoryErrorType_t for a description of available bit types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types.\n - * See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts. - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of the errors. - * @param counterType Flag that specifies the counter-type of the errors. - * @param eccCounts Reference in which to return the specified ECC errors - * - * @return - * - \ref NVML_SUCCESS if \a eccCounts has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceClearEccErrorCounts() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDetailedEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlEccErrorCounts_t *eccCounts); - -/** - * Retrieves the requested memory error counter for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. - * - * Only applicable to devices with ECC. - * - * Requires ECC Mode to be enabled. - * - * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types.\n - * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of error. - * @param counterType Flag that specifies the counter-type of the errors. - * @param locationType Specifies the location of the counter. - * @param count Reference in which to return the ECC counter - * - * @return - * - \ref NVML_SUCCESS if \a count has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is - * invalid, or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, - nvmlEccCounterType_t counterType, - nvmlMemoryLocation_t locationType, unsigned long long *count); - -/** - * Retrieves the current utilization rates for the device's major subsystems. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlUtilization_t for details on available utilization rates. - * - * \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings. - * This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization. - * - * @param device The identifier of the target device - * @param utilization Reference in which to return the utilization information - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization); - -/** - * Retrieves the current utilization and sampling size in microseconds for the Encoder - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param utilization Reference to an unsigned int for encoder utilization info - * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); - -/** - * Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param encoderQueryType Type of encoder to query - * @param encoderCapacity Reference to an unsigned int for the encoder capacity - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType - * are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderCapacity (nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int *encoderCapacity); - -/** - * Retrieves the current encoder statistics for a given device. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param sessionCount Reference to an unsigned int for count of active encoder sessions - * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions - * @param averageLatency Reference to an unsigned int for encode latency in microseconds - * - * @return - * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps, - * or \a averageLatency is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned int *sessionCount, - unsigned int *averageFps, unsigned int *averageLatency); - -/** - * Retrieves information about active encoder sessions on a target device. - * - * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The - * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions - * written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the active session array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. - * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return - * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param sessionCount Reference to caller supplied array size, and returns the number of sessions. - * @param sessionInfos Reference in which to return the session information - * - * @return - * - \ref NVML_SUCCESS if \a sessionInfos is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfos); - -/** - * Retrieves the current utilization and sampling size in microseconds for the Decoder - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param utilization Reference to an unsigned int for decoder utilization info - * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); - -/** - * Retrieves the current and pending driver model for the device. - * - * For Fermi &tm; or newer fully supported devices. - * For windows only. - * - * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached - * to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached. - * - * See \ref nvmlDriverModel_t for details on available driver models. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current driver model - * @param pending Reference in which to return the pending driver model - * - * @return - * - \ref NVML_SUCCESS if either \a current and/or \a pending have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetDriverModel() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); - -/** - * Get VBIOS version of the device. - * - * For all products. - * - * The VBIOS version may change from time to time. It will not exceed 32 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param version Reference to which to return the VBIOS version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char *version, unsigned int length); - -/** - * Get Bridge Chip Information for all the bridge chips on the board. - * - * For all fully supported products. - * Only applicable to multi-GPU products. - * - * @param device The identifier of the target device - * @param bridgeHierarchy Reference to the returned bridge chip Hierarchy - * - * @return - * - \ref NVML_SUCCESS if bridge chip exists - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t *bridgeHierarchy); - -/** - * Get information about processes with a compute context on a device - * - * For Fermi &tm; or newer fully supported devices. - * - * This function returns information only about compute running processes (e.g. CUDA application which have - * active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function. - * - * To query the current number of running compute processes, call this function with *infoCount = 0. The - * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call - * \a infos is allowed to be NULL. - * - * The usedGpuMemory field returned is all of the memory used by the application. - * - * Keep in mind that information returned by this call is dynamic and the number of elements might change in - * time. Allocate more space for \a infos table in case new compute processes are spawned. - * - * @param device The identifier of the target device - * @param infoCount Reference in which to provide the \a infos array size, and - * to return the number of returned elements - * @param infos Reference in which to return the process information - * - * @return - * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small - * \a infoCount will contain minimal amount of space necessary for - * the call to complete - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see \ref nvmlSystemGetProcessName - */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); - -/** - * Get information about processes with a graphics context on a device - * - * For Kepler &tm; or newer fully supported devices. - * - * This function returns information only about graphics based processes - * (eg. applications using OpenGL, DirectX) - * - * To query the current number of running graphics processes, call this function with *infoCount = 0. The - * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call - * \a infos is allowed to be NULL. - * - * The usedGpuMemory field returned is all of the memory used by the application. - * - * Keep in mind that information returned by this call is dynamic and the number of elements might change in - * time. Allocate more space for \a infos table in case new graphics processes are spawned. - * - * @param device The identifier of the target device - * @param infoCount Reference in which to provide the \a infos array size, and - * to return the number of returned elements - * @param infos Reference in which to return the process information - * - * @return - * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small - * \a infoCount will contain minimal amount of space necessary for - * the call to complete - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see \ref nvmlSystemGetProcessName - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); - -/** - * Check if the GPU devices are on the same physical board. - * - * For all fully supported products. - * - * @param device1 The first GPU device - * @param device2 The second GPU device - * @param onSameBoard Reference in which to return the status. - * Non-zero indicates that the GPUs are on the same board. - * - * @return - * - \ref NVML_SUCCESS if \a onSameBoard has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int *onSameBoard); - -/** - * Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs. - * If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions. - * - * For all fully supported products. - * - * @param device The identifier of the target device - * @param apiType Target API type for this operation - * @param isRestricted Reference in which to return the current restriction - * NVML_FEATURE_ENABLED indicates that the API is root-only - * NVML_FEATURE_DISABLED indicates that the API is accessible to all users - * - * @return - * - \ref NVML_SUCCESS if \a isRestricted has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support - * the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is - * not supported by the device) - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlRestrictedAPI_t - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t *isRestricted); - -/** - * Gets recent samples for the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by - * the driver. - * - * Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t. - * - * To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL. - * The returned samplesCount will provide the number of samples that can be queried. The user needs to - * allocate the buffer with size as samplesCount * sizeof(nvmlSample_t). - * - * lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the - * underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query - * to get more recent samples. - * - * This method fetches the number of entries which can be accommodated in the provided samples array, and the - * reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this - * method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost. - * - * @param device The identifier for the target device - * @param type Type of sampling event - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t - * @param sampleCount Reference to provide the number of elements which can be queried in samples array - * @param samples Reference in which samples are returned - - * @return - * - \ref NVML_SUCCESS if samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or - * reference to \a sampleCount is 0 for non null \a samples - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, - nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); - -/** - * Gets Total, Available and Used size of BAR1 memory. - * - * BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party - * devices (peer-to-peer on the PCIE bus). - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param bar1Memory Reference in which BAR1 memory - * information is returned. - * - * @return - * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); - - -/** - * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power - * or thermal constraints. - * - * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The - * difference in violation times at two different reference times gives the indication of GPU throttling event. - * - * Violation for thermal capping is not supported at this time. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param perfPolicyType Represents Performance policy which can trigger GPU throttling - * @param violTime Reference to which violation time related information is returned - * - * - * @return - * - \ref NVML_SUCCESS if violation time is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); - -/** - * @} - */ - -/** @addtogroup nvmlAccountingStats - * @{ - */ - -/** - * Queries the state of per process accounting mode. - * - * For Kepler &tm; or newer fully supported devices. - * - * See \ref nvmlDeviceGetAccountingStats for more details. - * See \ref nvmlDeviceSetAccountingMode - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current accounting mode - * - * @return - * - \ref NVML_SUCCESS if the mode has been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Queries process's accounting stats. - * - * For Kepler &tm; or newer fully supported devices. - * - * Accounting stats capture GPU utilization and other statistics across the lifetime of a process. - * Accounting stats can be queried during life time of the process and after its termination. - * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and - * updated to actual running time after its termination. - * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old - * processes. - * - * See \ref nvmlAccountingStats_t for description of each returned metric. - * List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids. - * - * @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode. - * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be - * queried since they don't contribute to GPU utilization. - * @note In case of pid collision stats of only the latest process (that terminated last) will be reported - * - * @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU. - * - * @param device The identifier of the target device - * @param pid Process Id of the target process to query stats for - * @param stats Reference in which to return the process's accounting stats - * - * @return - * - \ref NVML_SUCCESS if stats have been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL - * - \ref NVML_ERROR_NOT_FOUND if process stats were not found - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingBufferSize - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t *stats); - -/** - * Queries list of processes that can be queried for accounting stats. The list of processes returned - * can be in running or terminated state. - * - * For Kepler &tm; or newer fully supported devices. - * - * To just query the number of processes ready to be queried, call this function with *count = 0 and - * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. - * - * For more details see \ref nvmlDeviceGetAccountingStats. - * - * @note In case of PID collision some processes might not be accessible before the circular buffer is full. - * - * @param device The identifier of the target device - * @param count Reference in which to provide the \a pids array size, and - * to return the number of elements ready to be queried - * @param pids Reference in which to return list of process ids - * - * @return - * - \ref NVML_SUCCESS if pids were successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to - * expected value) - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingBufferSize - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int *count, unsigned int *pids); - -/** - * Returns the number of processes that the circular buffer with accounting pids can hold. - * - * For Kepler &tm; or newer fully supported devices. - * - * This is the maximum number of processes that accounting information will be stored for before information - * about oldest processes will get overwritten by information about new processes. - * - * @param device The identifier of the target device - * @param bufferSize Reference in which to provide the size (in number of elements) - * of the circular buffer for accounting stats. - * - * @return - * - \ref NVML_SUCCESS if buffer size was successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingStats - * @see nvmlDeviceGetAccountingPids - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int *bufferSize); - -/** @} */ - -/** @addtogroup nvmlDeviceQueries - * @{ - */ - -/** - * Returns the list of retired pages by source, including pages that are pending retirement - * The address information provided from this API is the hardware address of the page that was retired. Note - * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param cause Filter page addresses by cause of retirement - * @param pageCount Reference in which to provide the \a addresses buffer size, and - * to return the number of retired pages that match \a cause - * Set to 0 to query the size without allocating an \a addresses buffer - * @param addresses Buffer to write the page addresses into - * - * @return - * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the - * matching page addresses. \a pageCount is set to the needed size. - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or - * \a addresses is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, - unsigned int *pageCount, unsigned long long *addresses); - -/** - * Check if any pages are pending retirement and need a reboot to fully retire. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param isPending Reference in which to return the pending status - * - * @return - * - \ref NVML_SUCCESS if \a isPending was populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlUnitCommands Unit Commands - * This chapter describes NVML operations that change the state of the unit. For S-class products. - * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION - * error code when invoking any of these methods. - * @{ - */ -/***************************************************************************************************/ - -/** - * Set the LED state for the unit. The LED can be either green (0) or amber (1). - * - * For S-class products. - * Requires root/admin permissions. - * - * This operation takes effect immediately. - * - * - * Current S-Class products don't provide unique LEDs for each unit. As such, both front - * and back LEDs will be toggled in unison regardless of which unit is specified with this command. - * - * See \ref nvmlLedColor_t for available colors. - * - * @param unit The identifier of the target unit - * @param color The target LED color - * - * @return - * - \ref NVML_SUCCESS if the LED color has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlUnitGetLedState() - */ -nvmlReturn_t DECLDIR nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceCommands Device Commands - * This chapter describes NVML operations that change the state of the device. - * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION - * error code when invoking any of these methods. - * @{ - */ -/***************************************************************************************************/ - -/** - * Set the persistence mode for the device. - * - * For all products. - * For Linux only. - * Requires root/admin permissions. - * - * The persistence mode determines whether the GPU driver software is torn down after the last client - * exits. - * - * This operation takes effect immediately. It is not persistent across reboots. After each reboot the - * persistence mode is reset to "Disabled". - * - * See \ref nvmlEnableState_t for available modes. - * - * @param device The identifier of the target device - * @param mode The target persistence mode - * - * @return - * - \ref NVML_SUCCESS if the persistence mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetPersistenceMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode); - -/** - * Set the compute mode for the device. - * - * For all products. - * Requires root/admin permissions. - * - * The compute mode determines whether a GPU can be used for compute operations and whether it can - * be shared across contexts. - * - * This operation takes effect immediately. Under Linux it is not persistent across reboots and - * always resets to "Default". Under windows it is persistent. - * - * Under windows compute mode may only be set to DEFAULT when running in WDDM - * - * See \ref nvmlComputeMode_t for details on available compute modes. - * - * @param device The identifier of the target device - * @param mode The target compute mode - * - * @return - * - \ref NVML_SUCCESS if the compute mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetComputeMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode); - -/** - * Set the ECC mode for the device. - * - * For Kepler &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * Requires root/admin permissions. - * - * The ECC mode determines whether the GPU enables its ECC support. - * - * This operation takes effect after the next reboot. - * - * See \ref nvmlEnableState_t for details on available modes. - * - * @param device The identifier of the target device - * @param ecc The target ECC mode - * - * @return - * - \ref NVML_SUCCESS if the ECC mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetEccMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc); - -/** - * Clear the ECC error and other memory error counts for the device. - * - * For Kepler &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. - * Requires root/admin permissions. - * Requires ECC Mode to be enabled. - * - * Sets all of the specified ECC counters to 0, including both detailed and total counts. - * - * This operation takes effect immediately. - * - * See \ref nvmlMemoryErrorType_t for details on available counter types. - * - * @param device The identifier of the target device - * @param counterType Flag that indicates which type of errors should be cleared. - * - * @return - * - \ref NVML_SUCCESS if the error counts were cleared - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see - * - nvmlDeviceGetDetailedEccErrors() - * - nvmlDeviceGetTotalEccErrors() - */ -nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType); - -/** - * Set the driver model for the device. - * - * For Fermi &tm; or newer fully supported devices. - * For windows only. - * Requires root/admin permissions. - * - * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached - * to the device it must run in WDDM mode. - * - * It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce). - * This should only be done if the host is subsequently powered down and the display is detached from the device - * before the next reboot. - * - * This operation takes effect after the next reboot. - * - * Windows driver model may only be set to WDDM when running in DEFAULT compute mode. - * - * Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or - * will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode. - * - * See \ref nvmlDriverModel_t for details on available driver models. - * See \ref nvmlFlagDefault and \ref nvmlFlagForce - * - * @param device The identifier of the target device - * @param driverModel The target driver model - * @param flags Flags that change the default behavior - * - * @return - * - \ref NVML_SUCCESS if the driver model has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetDriverModel() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags); - -/** - * Set clocks that applications will lock to. - * - * Sets the clocks that compute and graphics applications will be running at. - * e.g. CUDA driver requests these clocks during context creation which means this property - * defines clocks at which CUDA applications will be running unless some overspec event - * occurs (e.g. over power, over thermal or external HW brake). - * - * Can be used as a setting to request constant performance. - * - * On Pascal and newer hardware, this will automatically disable automatic boosting of clocks. - * - * On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call - * \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting - * above the clock value being set. - * - * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks - * for details on how to list available clocks combinations. - * - * After system reboot or driver reload applications clocks go back to their default value. - * See \ref nvmlDeviceResetApplicationsClocks. - * - * @param device The identifier of the target device - * @param memClockMHz Requested memory clock in MHz - * @param graphicsClockMHz Requested graphics clock in MHz - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz - * is not a valid clock combination - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); - -/** - * Set new power limit of this device. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. - * - * \note Limit is not persistent across reboots or driver unloads. - * Enable persistent mode to prevent driver from unloading when no application is using the device. - * - * @param device The identifier of the target device - * @param limit Power management limit in milliwatts to set - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetPowerManagementLimitConstraints - * @see nvmlDeviceGetPowerManagementDefaultLimit - */ -nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit); - -/** - * Sets new GOM. See \a nvmlGpuOperationMode_t for details. - * - * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. - * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. - * Not supported on Quadro ® and Tesla &tm; C-class products. - * Requires root/admin permissions. - * - * Changing GOMs requires a reboot. - * The reboot requirement might be removed in the future. - * - * Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when - * pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel. - * - * @param device The identifier of the target device - * @param mode Target GOM - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlGpuOperationMode_t - * @see nvmlDeviceGetGpuOperationMode - */ -nvmlReturn_t DECLDIR nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode); - -/** - * Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs. - * This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. - * The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction - * to query the current restriction settings. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * @param device The identifier of the target device - * @param apiType Target API type for this operation - * @param isRestricted The target restriction - * - * @return - * - \ref NVML_SUCCESS if \a isRestricted has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support - * the feature that api restrictions are being set for (E.G. Enabling/disabling auto - * boosted clocks is not supported by the device) - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlRestrictedAPI_t - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); - -/** - * @} - */ - -/** @addtogroup nvmlAccountingStats - * @{ - */ - -/** - * Enables or disables per process accounting. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * @note This setting is not persistent and will default to disabled after driver unloads. - * Enable persistence mode to be sure the setting doesn't switch off to disabled. - * - * @note Enabling accounting mode has no negative impact on the GPU performance. - * - * @note Disabling accounting clears all accounting pids information. - * - * See \ref nvmlDeviceGetAccountingMode - * See \ref nvmlDeviceGetAccountingStats - * See \ref nvmlDeviceClearAccountingPids - * - * @param device The identifier of the target device - * @param mode The target accounting mode - * - * @return - * - \ref NVML_SUCCESS if the new mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode); - -/** - * Clears accounting information about all processes that have already terminated. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetAccountingMode - * See \ref nvmlDeviceGetAccountingStats - * See \ref nvmlDeviceSetAccountingMode - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if accounting information has been cleared - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceClearAccountingPids(nvmlDevice_t device); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup NvLink NvLink Methods - * This chapter describes methods that NVML can perform on NVLINK enabled devices. - * @{ - */ -/***************************************************************************************************/ - -/** - * Retrieves the state of the device's NvLink for the link specified - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that - * the link is active and NVML_FEATURE_DISABLED indicates it - * is inactive - * - * @return - * - \ref NVML_SUCCESS if \a isActive has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t *isActive); - -/** - * Retrieves the version of the device's NvLink for the link specified - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param version Requested NvLink version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int *version); - -/** - * Retrieves the requested capability from the device's NvLink for the link specified - * Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried - * The return value should be treated as a boolean. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param capability Specifies the \a nvmlNvLinkCapability_t to be queried - * @param capResult A boolean for the queried capability indicating that feature is available - * - * @return - * - \ref NVML_SUCCESS if \a capResult has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, - nvmlNvLinkCapability_t capability, unsigned int *capResult); - -/** - * Retrieves the PCI information for the remote node on a NvLink link - * Note: pciSubSystemId is not filled in this function and is indeterminate - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param pci \a nvmlPciInfo_t of the remote node for the specified link - * - * @return - * - \ref NVML_SUCCESS if \a pci has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); - -/** - * Retrieves the specified error counter value - * Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the NvLink counter to be queried - * @param counterValue Returned counter value - * - * @return - * - \ref NVML_SUCCESS if \a counter has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, - nvmlNvLinkErrorCounter_t counter, unsigned long long *counterValue); - -/** - * Resets all error counters to zero - * Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * - * @return - * - \ref NVML_SUCCESS if the reset is successful - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link); - -/** - * Set the NVLINK utilization counter control information for the specified counter, 0 or 1. - * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset - * of the counters if the reset parameter is non-zero. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param counter Specifies the counter that should be set (0 or 1). - * @param link Specifies the NvLink link to be queried - * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set - * @param reset Resets the counters on set if non-zero - * - * @return - * - \ref NVML_SUCCESS if the control has been set successfully - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, - nvmlNvLinkUtilizationControl_t *control, unsigned int reset); - -/** - * Get the NVLINK utilization counter control information for the specified counter, 0 or 1. - * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param counter Specifies the counter that should be set (0 or 1). - * @param link Specifies the NvLink link to be queried - * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information - * - * @return - * - \ref NVML_SUCCESS if the control has been set successfully - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, - nvmlNvLinkUtilizationControl_t *control); - - -/** - * Retrieve the NVLINK utilization counter based on the current control for a specified counter. - * In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl - * before reading the utilization counters as they have no default state - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the counter that should be read (0 or 1). - * @param rxcounter Receive counter return value - * @param txcounter Transmit counter return value - * - * @return - * - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationCounter(nvmlDevice_t device, unsigned int link, unsigned int counter, - unsigned long long *rxcounter, unsigned long long *txcounter); - -/** - * Freeze the NVLINK utilization counters - * Both the receive and transmit counters are operated on by this function - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the counter that should be frozen (0 or 1). - * @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters - * NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters - * - * @return - * - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceFreezeNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, - unsigned int counter, nvmlEnableState_t freeze); - -/** - * Reset the NVLINK utilization counters - * Both the receive and transmit counters are operated on by this function - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be reset - * @param counter Specifies the counter that should be reset (0 or 1) - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, unsigned int counter); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlEvents Event Handling Methods - * This chapter describes methods that NVML can perform against each device to register and wait for - * some event to occur. - * @{ - */ -/***************************************************************************************************/ - -/** - * Create an empty set of events. - * Event set should be freed by \ref nvmlEventSetFree - * - * For Fermi &tm; or newer fully supported devices. - * @param set Reference in which to return the event handle - * - * @return - * - \ref NVML_SUCCESS if the event has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventSetFree - */ -nvmlReturn_t DECLDIR nvmlEventSetCreate(nvmlEventSet_t *set); - -/** - * Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t - * - * For Fermi &tm; or newer fully supported devices. - * Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors) - * Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode) - * - * For Linux only. - * - * \b IMPORTANT: Operations on \a set are not thread safe - * - * This call starts recording of events on specific device. - * All events that occurred before this call are not recorded. - * Checking if some event occurred can be done with \ref nvmlEventSetWait - * - * If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed. - * If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes - * are registered in that case. - * - * @param device The identifier of the target device - * @param eventTypes Bitmask of \ref nvmlEventType to record - * @param set Set to which add new event types - * - * @return - * - \ref NVML_SUCCESS if the event has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceGetSupportedEventTypes - * @see nvmlEventSetWait - * @see nvmlEventSetFree - */ -nvmlReturn_t DECLDIR nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set); - -/** - * Returns information about events supported on device - * - * For Fermi &tm; or newer fully supported devices. - * - * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. - * - * @param device The identifier of the target device - * @param eventTypes Reference in which to return bitmask of supported events - * - * @return - * - \ref NVML_SUCCESS if the eventTypes has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long *eventTypes); - -/** - * Waits on events and delivers events - * - * For Fermi &tm; or newer fully supported devices. - * - * If some events are ready to be delivered at the time of the call, function returns immediately. - * If there are no events ready to be delivered, function sleeps till event arrives - * but not longer than specified timeout. This function in certain conditions can return before - * specified timeout passes (e.g. when interrupt arrives) - * - * In case of xid error, the function returns the most recent xid error type seen by the system. If there are multiple - * xid errors generated before nvmlEventSetWait is invoked then the last seen xid error type is returned for all - * xid error events. - * - * @param set Reference to set of events to wait on - * @param data Reference in which to return event data - * @param timeoutms Maximum amount of wait time in milliseconds for registered event - * - * @return - * - \ref NVML_SUCCESS if the data has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL - * - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived - * - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); - -/** - * Releases events in the set - * - * For Fermi &tm; or newer fully supported devices. - * - * @param set Reference to events to be released - * - * @return - * - \ref NVML_SUCCESS if the event has been successfully released - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlEventSetFree(nvmlEventSet_t set); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlZPI Drain states - * This chapter describes methods that NVML can perform against each device to control their drain state - * and recognition by NVML and NVIDIA kernel driver. These methods can be used with out-of-band tools to - * power on/off GPUs, enable robust reset scenarios, etc. - * @{ - */ -/***************************************************************************************************/ - -/** - * Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. - * Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before - * this call is made. - * Must be called as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU drain state to be modified - * @param newState The drain state that should be entered, see \ref nvmlEnableState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation - * - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceModifyDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t newState); - -/** - * Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining - * state. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU drain state to be queried - * @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceQueryDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t *currentState); - -/** - * This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver - * as long as no other processes are attached. If other processes are attached, this call will return - * NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the - * only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called - * to initiate the draining state is if that process was using, and is still using, a GPU before the - * call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled - * prior to this call. - * - * For long-running NVML processes please note that this will change the enumeration of current GPUs. - * For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2. - * Also, device handles after the removed GPU will not be valid and must be re-established. - * Must be run as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU to be removed - * @param gpuState Whether the GPU is to be removed, from the OS - * see \ref nvmlDetachGpuState_t - * @param linkState Requested upstream PCIe link state, see \ref nvmlPcieLinkState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed - */ -nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu (nvmlPciInfo_t *pciInfo, nvmlDetachGpuState_t gpuState, nvmlPcieLinkState_t linkState); - -/** - * Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that - * were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. - * If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes - * the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order. - * - * In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds - * per GPU. Also, all device handles are no longer guaranteed to be valid post discovery. - * - * Must be run as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI tree to be searched. Only the domain, bus, and device - * fields are used in this call. - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature - * - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature - * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceDiscoverGpus (nvmlPciInfo_t *pciInfo); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlFieldValueQueries Field Value Queries - * This chapter describes NVML operations that are associated with retrieving Field Values from NVML - * @{ - */ -/***************************************************************************************************/ - -/** - * Request values for a list of fields for a device. This API allows multiple fields to be queried at once. - * If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs - * will be populated from a single call rather than making a driver call for each fieldId. - * - * @param device The device handle of the GPU to request field values for - * @param valuesCount Number of entries in values that should be retrieved - * @param values Array of \a valuesCount structures to hold field values. - * Each value's fieldId must be populated prior to this call - * - * @return - * - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must - * check the nvmlReturn field of each value for each individual - * status - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL - */ -nvmlReturn_t DECLDIR nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); - - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlGridQueries Grid Queries - * This chapter describes NVML operations that are associated with NVIDIA GRID products. - * @{ - */ -/***************************************************************************************************/ - -/** - * This method is used to get the virtualization mode corresponding to the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device Identifier of the target device - * @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_? - * - * @return - * - \ref NVML_SUCCESS if \a pVirtualMode is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t *pVirtualMode); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlGridCommands Grid Commands - * This chapter describes NVML operations that are associated with NVIDIA GRID products. - * @{ - */ -/***************************************************************************************************/ - -/** - * This method is used to set the virtualization mode corresponding to the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device Identifier of the target device - * @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_? - * - * @return - * - \ref NVML_SUCCESS if \a pVirtualMode is set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported. - * - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client. - */ -nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpu vGPU Management - * @{ - * - * Set of APIs supporting GRID vGPU - */ -/***************************************************************************************************/ - -/** - * Retrieve the supported vGPU types on a physical GPU (device). - * - * An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer - * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount - * is used to return the number of vGPU types written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. - * To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0. - * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types - * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); - -/** - * Retrieve the currently creatable vGPU types on a physical GPU (device). - * - * An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer - * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount - * is used to return the number of vGPU types written to the buffer. - * - * The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types - * can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable - * list will be restricted to whatever vGPU type is already running on the device. - * - * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. - * To query the number of vGPU types createable for the GPU, call this function with *vgpuCount = 0. - * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types - * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); - -/** - * Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeClass Pointer to string array to return class in - * @param size Size of string - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeClass, unsigned int *size); - -/** - * Retrieve the vGPU type name. - * - * The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not - * exceed 64 characters in length (including the NUL terminator). See \ref - * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeName Pointer to buffer to return name - * @param size Size of buffer - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeName, unsigned int *size); - -/** - * Retrieve the device ID of a vGPU type. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param deviceID Device ID and vendor ID of the device contained in single 32 bit value - * @param subsystemID Subsytem ID and subsytem vendor ID of the device contained in single 32 bit value - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *deviceID, unsigned long long *subsystemID); - -/** - * Retrieve the vGPU framebuffer size in bytes. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param fbSize Pointer to framebuffer size in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbSize); - -/** - * Retrieve count of vGPU's supported display heads. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param numDisplayHeads Pointer to number of display heads - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *numDisplayHeads); - -/** - * Retrieve vGPU display head's maximum supported resolution. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param displayIndex Zero-based index of display head - * @param xdim Pointer to maximum number of pixels in X dimension - * @param ydim Pointer to maximum number of pixels in Y dimension - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex - * is out of range. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int *xdim, unsigned int *ydim); - -/** - * Retrieve license requirements for a vGPU type - * - * The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form - * ",", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license, - * the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0". - * - * The total length of the returned string will not exceed 128 characters, including the NUL terminator. - * See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeLicenseString Pointer to buffer to return license info - * @param size Size of \a vgpuTypeLicenseString buffer - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeLicenseString, unsigned int size); - -/** - * Retrieve the static frame rate limit value of the vGPU type - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param frameRateLimit Reference to return the frame rate limit value - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a frameRateLimit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *frameRateLimit); - -/** - * Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param vgpuTypeId Handle to vGPU type - * @param vgpuInstanceCount Pointer to get the max number of vGPU instances - * that can be created on a deicve for given vgpuTypeId - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device, - * or \a vgpuInstanceCount is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCount); - -/** - * Retrieve the active vGPU instances on a device. - * - * An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The - * array elememt count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances - * written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the vGPU instance array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount. - * To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return - * NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer which passes in the array size as well as get - * back the number of types - * @param vgpuInstances Pointer to array in which to return list of vGPU instances - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances); - -/** - * Retrieve the VM ID associated with a vGPU instance. - * - * The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param vmId Pointer to caller-supplied buffer to hold VM ID - * @param size Size of buffer in bytes - * @param vmIdType Pointer to hold VM ID type - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a vmId or \a vmIdType are NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char *vmId, unsigned int size, nvmlVgpuVmIdType_t *vmIdType); - -/** - * Retrieve the UUID of a vGPU instance. - * - * The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string, - * not exceeding 80 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param uuid Pointer to caller-supplied buffer to hold vGPU UUID - * @param size Size of buffer in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a uuid is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char *uuid, unsigned int size); - -/** - * Retrieve the NVIDIA driver version installed in the VM associated with a vGPU. - * - * The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version - * string will not exceed 80 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. - * - * nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is - * returned as "Unknown" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the - * NVIDIA driver is loaded and initialized. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param version Caller-supplied buffer to return driver version string - * @param length Size of \a version buffer - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length); - -/** - * Retrieve the framebuffer usage in bytes. - * - * Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance The identifier of the target instance - * @param fbUsage Pointer to framebuffer usage in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a fbUsage is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long *fbUsage); - -/** - * Retrieve the current licensing state of the vGPU instance. - * - * If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param licensed Reference to return the licensing status - * - * @return - * - \ref NVML_SUCCESS if \a licensed has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a licensed is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int *licensed); - -/** - * Retrieve the vGPU type of a vGPU instance. - * - * Returns the vGPU type ID of vgpu assigned to the vGPU instance. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param vgpuTypeId Reference to return the vgpuTypeId - * - * @return - * - \ref NVML_SUCCESS if \a vgpuTypeId has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a vgpuTypeId is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t *vgpuTypeId); - -/** - * Retrieve the frame rate limit set for the vGPU instance. - * - * Returns the value of the frame rate limit set for the vGPU instance - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param frameRateLimit Reference to return the frame rate limit - * - * @return - * - \ref NVML_SUCCESS if \a frameRateLimit has been set - * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a frameRateLimit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int *frameRateLimit); - -/** - * Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param encoderCapacity Reference to an unsigned int for the encoder capacity - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity has been retrived - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a encoderQueryType is invalid - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity); - -/** - * Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param encoderCapacity Unsigned int for the encoder capacity value - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity); - -/** - * Retrieves current utilization for vGPUs on a physical GPU (device). - * - * For Kepler &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running - * on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer - * pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the - * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values - * in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to - * indicate the returned value type. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance - * count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate - * a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with - * the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the - * buffer is sized for. - * - * On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample - * structures that were actually written. This may differ from a previously read value as vGPU instances are created or - * destroyed. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier for the target device - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values - * @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances - * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned - - * @return - * - \ref NVML_SUCCESS if utilization samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is - * NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all - * vGPU instances currently executing on the device - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, - nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, - nvmlVgpuInstanceUtilizationSample_t *utilizationSamples); - -/** - * Retrieves current utilization for processes running on vGPUs on a physical GPU (device). - * - * For Maxwell &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on - * vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the - * caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running - * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which - * the samples were recorded. Individual utilization values are returned as "unsigned int" values. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance - * count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size - * vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with - * the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the - * buffer is sized for. - * - * On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample - * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active - * in any given sample period. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier for the target device - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances - * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned - - * @return - * - \ref NVML_SUCCESS if utilization samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is - * passed with a non-NULL \a utilizationSamples - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all - * vGPU instances currently executing on the device - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, - unsigned int *vgpuProcessSamplesCount, - nvmlVgpuProcessUtilizationSample_t *utilizationSamples); -/** - * Retrieve the GRID licensable features. - * - * Identifies whether the system supports GRID Software Licensing. If it does, return the list of licensable feature(s) - * and their current license status. - * - * @param device Identifier of the target device - * @param pGridLicensableFeatures Pointer to structure in which GRID licensable features are returned - * - * @return - * - \ref NVML_SUCCESS if licensable features are successfully retrieved - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); - -/** - * Retrieves the current encoder statistics of a vGPU Instance - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param sessionCount Reference to an unsigned int for count of active encoder sessions - * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions - * @param averageLatency Reference to an unsigned int for encode latency in microseconds - * - * @return - * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL - * or \a vgpuInstance is invalid. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, - unsigned int *averageFps, unsigned int *averageLatency); - -/** - * Retrieves information about all active encoder sessions on a vGPU Instance. - * - * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The - * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions - * written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the active session array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. - * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return - * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param sessionCount Reference to caller supplied array size, and returns - * the number of sessions. - * @param sessionInfo Reference to caller supplied array in which the list - * of session information us returned. - * - * @return - * - \ref NVML_SUCCESS if \a sessionInfo is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is - returned in \a sessionCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL or \a vgpuInstance is invalid.. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfo); - -/** - * Retrieves the current utilization and process ID - * - * For Maxwell &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. - * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at - * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization - * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values - * are returned as "unsigned int" values. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilization set to NULL. The caller should allocate a buffer of size - * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed - * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. - * - * On successful return, the function updates \a processSamplesCount with the number of process utilization sample - * structures that were actually written. This may differ from a previously read value as instances are created or - * destroyed. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier of the target device - * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned - * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, - unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvml vGPU Migration - * This chapter describes NVML operations that are associated with vGPU Migration. - * @{ - */ -/***************************************************************************************************/ - -/** - * vGPU metadata structure. - */ -typedef struct nvmlVgpuMetadata_st -{ - unsigned int version; //!< Current version of the structure - unsigned int revision; //!< Current revision of the structure - nvmlVgpuGuestInfoState_t guestInfoState; //!< Current state of Guest-dependent fields - char guestDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in guest - char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in host - unsigned int reserved[8]; //!< Reserved for internal use - unsigned int opaqueDataSize; //!< Size of opaque data field in bytes - char opaqueData[4]; //!< Opaque data -} nvmlVgpuMetadata_t; - -/** - * Physical GPU metadata structure - */ -typedef struct nvmlVgpuPgpuMetadata_st -{ - unsigned int version; //!< Current version of the structure - unsigned int revision; //!< Current revision of the structure - char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Host driver version - unsigned int pgpuVirtualizationCaps; //!< Pgpu virtualizaion capabilities bitfileld - unsigned int reserved[7]; //!< Reserved for internal use - unsigned int opaqueDataSize; //!< Size of opaque data field in bytes - char opaqueData[4]; //!< Opaque data -} nvmlVgpuPgpuMetadata_t; - -/** - * vGPU VM compatibility codes - */ -typedef enum nvmlVgpuVmCompatibility_enum -{ - NVML_VGPU_VM_COMPATIBILITY_NONE = 0x0, //!< vGPU is not runnable - NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1, //!< vGPU is runnable from a cold / powered-off state (ACPI S5) - NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2, //!< vGPU is runnable from a hibernated state (ACPI S4) - NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4, //!< vGPU is runnable from a sleeped state (ACPI S3) - NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8, //!< vGPU is runnable from a live/paused (ACPI S0) -} nvmlVgpuVmCompatibility_t; - -/** - * vGPU-pGPU compatibility limit codes - */ -typedef enum nvmlVgpuPgpuCompatibilityLimitCode_enum -{ - NVML_VGPU_COMPATIBILITY_LIMIT_NONE = 0x0, //!< Compatibility is not limited. - NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1, //!< Compatibility is limited by host driver version. - NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2, //!< Compatibility is limited by guest driver version. - NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4, //!< Compatibility is limited by GPU hardware. - NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000, //!< Compatibility is limited by an undefined factor. -} nvmlVgpuPgpuCompatibilityLimitCode_t; - -/** - * vGPU-pGPU compatibility structure - */ -typedef struct nvmlVgpuPgpuCompatibility_st -{ - nvmlVgpuVmCompatibility_t vgpuVmCompatibility; //!< Compatibility of vGPU VM. See \ref nvmlVgpuVmCompatibility_t - nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode; //!< Limiting factor for vGPU-pGPU compatibility. See \ref nvmlVgpuPgpuCompatibilityLimitCode_t -} nvmlVgpuPgpuCompatibility_t; - -/** - * Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its associated VM - * such as the currently installed NVIDIA guest driver version, together with host driver version and an opaque data section - * containing internal state. - * - * nvmlVgpuInstanceGetMetadata() may be called at any time for a vGPU instance. Some fields in the returned structure are - * dependent on information obtained from the guest VM, which may not yet have reached a state where that information - * is available. The current state of these dependent fields is reflected in the info structure's \ref guestInfoState field. - * - * The VMM may choose to read and save the vGPU's VM info as persistent metadata associated with the VM, and provide - * it to GRID Virtual GPU Manager when creating a vGPU for subsequent instances of the VM. - * - * The caller passes in a buffer via \a vgpuMetadata, with the size of the buffer in \a bufferSize. If the vGPU Metadata structure - * is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed - * in \a bufferSize. - * - * @param vgpuInstance vGPU instance handle - * @param vgpuMetadata Pointer to caller-supplied buffer into which vGPU metadata is written - * @param bufferSize Size of vgpuMetadata buffer - * - * @return - * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned - * - \ref NVML_ERROR_INSUFFICIENT_SIZE vgpuMetadata buffer is too small, required size is returned in \a bufferSize - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a vgpuInstance is invalid; if \a vgpuMetadata is NULL and the value of \a bufferSize is not 0. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuMetadata_t *vgpuMetadata, unsigned int *bufferSize); - -/** - * Returns a vGPU metadata structure for the physical GPU indicated by \a device. The structure contains information about - * the GPU and the currently installed NVIDIA host driver version that's controlling it, together with an opaque data section - * containing internal state. - * - * The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the \a pgpuMetadata - * structure is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed - * in \a bufferSize. - * - * @param device The identifier of the target device - * @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written - * @param bufferSize Pointer to size of \a pgpuMetadata buffer - * - * @return - * - \ref NVML_SUCCESS GPU metadata structure was successfully returned - * - \ref NVML_ERROR_INSUFFICIENT_SIZE pgpuMetadata buffer is too small, required size is returned in \a bufferSize - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. - * - \ref NVML_ERROR_NOT_SUPPORTED vGPU is not supported by the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpuMetadata_t *pgpuMetadata, unsigned int *bufferSize); - -/** - * Takes a vGPU instance metadata structure read from \ref nvmlVgpuInstanceGetMetadata(), and a vGPU metadata structure for a - * physical GPU read from \ref nvmlDeviceGetVgpuMetadata(), and returns compatibility information of the vGPU instance and the - * physical GPU. - * - * The caller passes in a buffer via \a compatibilityInfo, into which a compatibility information structure is written. The - * structure defines the states in which the vGPU / VM may be booted on the physical GPU. If the vGPU / VM compatibility - * with the physical GPU is limited, a limit code indicates the factor limiting compability. - * (see \ref nvmlVgpuPgpuCompatibilityLimitCode_t for details). - * - * Note: vGPU compatibility does not take into account dynamic capacity conditions that may limit a system's ability to - * boot a given vGPU or associated VM. - * - * @param vgpuMetadata Pointer to caller-supplied vGPU metadata structure - * @param pgpuMetadata Pointer to caller-supplied GPU metadata structure - * @param compatibilityInfo Pointer to caller-supplied buffer to hold compatibility info - * - * @return - * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *vgpuMetadata, nvmlVgpuPgpuMetadata_t *pgpuMetadata, nvmlVgpuPgpuCompatibility_t *compatibilityInfo); - -/** @} */ - -/** - * NVML API versioning support - */ -#if defined(__NVML_API_VERSION_INTERNAL) -#undef nvmlDeviceRemoveGpu -#undef nvmlDeviceGetNvLinkRemotePciInfo -#undef nvmlDeviceGetPciInfo -#undef nvmlDeviceGetCount -#undef nvmlDeviceGetHandleByIndex -#undef nvmlDeviceGetHandleByPciBusId -#undef nvmlInit -#endif - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.c b/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.c deleted file mode 100644 index a3d162c0e1b..00000000000 --- a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.c +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. - -#include -#include - -#include "nvml_dl.h" - -#define DLSYM(x, sym) \ -do { \ - dlerror(); \ - x = dlsym(handle, #sym); \ - if (dlerror() != NULL) { \ - return (NVML_ERROR_FUNCTION_NOT_FOUND); \ - } \ -} while (0) - -typedef nvmlReturn_t (*nvmlSym_t)(); - -static void *handle; - -nvmlReturn_t NVML_DL(nvmlInit)(void) -{ - handle = dlopen("libnvidia-ml.so.1", RTLD_LAZY | RTLD_GLOBAL); - if (handle == NULL) { - return (NVML_ERROR_LIBRARY_NOT_FOUND); - } - return (nvmlInit()); -} - -nvmlReturn_t NVML_DL(nvmlShutdown)(void) -{ - nvmlReturn_t r = nvmlShutdown(); - if (r != NVML_SUCCESS) { - return (r); - } - return (dlclose(handle) ? NVML_ERROR_UNKNOWN : NVML_SUCCESS); -} - -nvmlReturn_t NVML_DL(nvmlDeviceGetTopologyCommonAncestor)( - nvmlDevice_t dev1, nvmlDevice_t dev2, nvmlGpuTopologyLevel_t *info) -{ - nvmlSym_t sym; - - DLSYM(sym, nvmlDeviceGetTopologyCommonAncestor); - return ((*sym)(dev1, dev2, info)); -} diff --git a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.h b/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.h deleted file mode 100644 index 628f0b3a2c2..00000000000 --- a/ecs-init/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.h +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. - -#ifndef _NVML_DL_H_ -#define _NVML_DL_H_ - -#include "nvml.h" - -#define NVML_DL(x) x##_dl - -extern nvmlReturn_t NVML_DL(nvmlInit)(void); -extern nvmlReturn_t NVML_DL(nvmlShutdown)(void); -extern nvmlReturn_t NVML_DL(nvmlDeviceGetTopologyCommonAncestor)( - nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *); - -#endif // _NVML_DL_H_ diff --git a/ecs-init/vendor/github.com/golang/mock/mockgen/model/model.go b/ecs-init/vendor/github.com/golang/mock/mockgen/model/model.go new file mode 100644 index 00000000000..2c6a62ceb26 --- /dev/null +++ b/ecs-init/vendor/github.com/golang/mock/mockgen/model/model.go @@ -0,0 +1,495 @@ +// Copyright 2012 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains the data model necessary for generating mock implementations. +package model + +import ( + "encoding/gob" + "fmt" + "io" + "reflect" + "strings" +) + +// pkgPath is the importable path for package model +const pkgPath = "github.com/golang/mock/mockgen/model" + +// Package is a Go package. It may be a subset. +type Package struct { + Name string + PkgPath string + Interfaces []*Interface + DotImports []string +} + +// Print writes the package name and its exported interfaces. +func (pkg *Package) Print(w io.Writer) { + _, _ = fmt.Fprintf(w, "package %s\n", pkg.Name) + for _, intf := range pkg.Interfaces { + intf.Print(w) + } +} + +// Imports returns the imports needed by the Package as a set of import paths. +func (pkg *Package) Imports() map[string]bool { + im := make(map[string]bool) + for _, intf := range pkg.Interfaces { + intf.addImports(im) + } + return im +} + +// Interface is a Go interface. +type Interface struct { + Name string + Methods []*Method +} + +// Print writes the interface name and its methods. +func (intf *Interface) Print(w io.Writer) { + _, _ = fmt.Fprintf(w, "interface %s\n", intf.Name) + for _, m := range intf.Methods { + m.Print(w) + } +} + +func (intf *Interface) addImports(im map[string]bool) { + for _, m := range intf.Methods { + m.addImports(im) + } +} + +// AddMethod adds a new method, de-duplicating by method name. +func (intf *Interface) AddMethod(m *Method) { + for _, me := range intf.Methods { + if me.Name == m.Name { + return + } + } + intf.Methods = append(intf.Methods, m) +} + +// Method is a single method of an interface. +type Method struct { + Name string + In, Out []*Parameter + Variadic *Parameter // may be nil +} + +// Print writes the method name and its signature. +func (m *Method) Print(w io.Writer) { + _, _ = fmt.Fprintf(w, " - method %s\n", m.Name) + if len(m.In) > 0 { + _, _ = fmt.Fprintf(w, " in:\n") + for _, p := range m.In { + p.Print(w) + } + } + if m.Variadic != nil { + _, _ = fmt.Fprintf(w, " ...:\n") + m.Variadic.Print(w) + } + if len(m.Out) > 0 { + _, _ = fmt.Fprintf(w, " out:\n") + for _, p := range m.Out { + p.Print(w) + } + } +} + +func (m *Method) addImports(im map[string]bool) { + for _, p := range m.In { + p.Type.addImports(im) + } + if m.Variadic != nil { + m.Variadic.Type.addImports(im) + } + for _, p := range m.Out { + p.Type.addImports(im) + } +} + +// Parameter is an argument or return parameter of a method. +type Parameter struct { + Name string // may be empty + Type Type +} + +// Print writes a method parameter. +func (p *Parameter) Print(w io.Writer) { + n := p.Name + if n == "" { + n = `""` + } + _, _ = fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, "")) +} + +// Type is a Go type. +type Type interface { + String(pm map[string]string, pkgOverride string) string + addImports(im map[string]bool) +} + +func init() { + gob.Register(&ArrayType{}) + gob.Register(&ChanType{}) + gob.Register(&FuncType{}) + gob.Register(&MapType{}) + gob.Register(&NamedType{}) + gob.Register(&PointerType{}) + + // Call gob.RegisterName to make sure it has the consistent name registered + // for both gob decoder and encoder. + // + // For a non-pointer type, gob.Register will try to get package full path by + // calling rt.PkgPath() for a name to register. If your project has vendor + // directory, it is possible that PkgPath will get a path like this: + // ../../../vendor/github.com/golang/mock/mockgen/model + gob.RegisterName(pkgPath+".PredeclaredType", PredeclaredType("")) +} + +// ArrayType is an array or slice type. +type ArrayType struct { + Len int // -1 for slices, >= 0 for arrays + Type Type +} + +func (at *ArrayType) String(pm map[string]string, pkgOverride string) string { + s := "[]" + if at.Len > -1 { + s = fmt.Sprintf("[%d]", at.Len) + } + return s + at.Type.String(pm, pkgOverride) +} + +func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) } + +// ChanType is a channel type. +type ChanType struct { + Dir ChanDir // 0, 1 or 2 + Type Type +} + +func (ct *ChanType) String(pm map[string]string, pkgOverride string) string { + s := ct.Type.String(pm, pkgOverride) + if ct.Dir == RecvDir { + return "<-chan " + s + } + if ct.Dir == SendDir { + return "chan<- " + s + } + return "chan " + s +} + +func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) } + +// ChanDir is a channel direction. +type ChanDir int + +// Constants for channel directions. +const ( + RecvDir ChanDir = 1 + SendDir ChanDir = 2 +) + +// FuncType is a function type. +type FuncType struct { + In, Out []*Parameter + Variadic *Parameter // may be nil +} + +func (ft *FuncType) String(pm map[string]string, pkgOverride string) string { + args := make([]string, len(ft.In)) + for i, p := range ft.In { + args[i] = p.Type.String(pm, pkgOverride) + } + if ft.Variadic != nil { + args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride)) + } + rets := make([]string, len(ft.Out)) + for i, p := range ft.Out { + rets[i] = p.Type.String(pm, pkgOverride) + } + retString := strings.Join(rets, ", ") + if nOut := len(ft.Out); nOut == 1 { + retString = " " + retString + } else if nOut > 1 { + retString = " (" + retString + ")" + } + return "func(" + strings.Join(args, ", ") + ")" + retString +} + +func (ft *FuncType) addImports(im map[string]bool) { + for _, p := range ft.In { + p.Type.addImports(im) + } + if ft.Variadic != nil { + ft.Variadic.Type.addImports(im) + } + for _, p := range ft.Out { + p.Type.addImports(im) + } +} + +// MapType is a map type. +type MapType struct { + Key, Value Type +} + +func (mt *MapType) String(pm map[string]string, pkgOverride string) string { + return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride) +} + +func (mt *MapType) addImports(im map[string]bool) { + mt.Key.addImports(im) + mt.Value.addImports(im) +} + +// NamedType is an exported type in a package. +type NamedType struct { + Package string // may be empty + Type string +} + +func (nt *NamedType) String(pm map[string]string, pkgOverride string) string { + if pkgOverride == nt.Package { + return nt.Type + } + prefix := pm[nt.Package] + if prefix != "" { + return prefix + "." + nt.Type + } + + return nt.Type +} + +func (nt *NamedType) addImports(im map[string]bool) { + if nt.Package != "" { + im[nt.Package] = true + } +} + +// PointerType is a pointer to another type. +type PointerType struct { + Type Type +} + +func (pt *PointerType) String(pm map[string]string, pkgOverride string) string { + return "*" + pt.Type.String(pm, pkgOverride) +} +func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) } + +// PredeclaredType is a predeclared type such as "int". +type PredeclaredType string + +func (pt PredeclaredType) String(map[string]string, string) string { return string(pt) } +func (pt PredeclaredType) addImports(map[string]bool) {} + +// The following code is intended to be called by the program generated by ../reflect.go. + +// InterfaceFromInterfaceType returns a pointer to an interface for the +// given reflection interface type. +func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) { + if it.Kind() != reflect.Interface { + return nil, fmt.Errorf("%v is not an interface", it) + } + intf := &Interface{} + + for i := 0; i < it.NumMethod(); i++ { + mt := it.Method(i) + // TODO: need to skip unexported methods? or just raise an error? + m := &Method{ + Name: mt.Name, + } + + var err error + m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type) + if err != nil { + return nil, err + } + + intf.AddMethod(m) + } + + return intf, nil +} + +// t's Kind must be a reflect.Func. +func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) { + nin := t.NumIn() + if t.IsVariadic() { + nin-- + } + var p *Parameter + for i := 0; i < nin; i++ { + p, err = parameterFromType(t.In(i)) + if err != nil { + return + } + in = append(in, p) + } + if t.IsVariadic() { + p, err = parameterFromType(t.In(nin).Elem()) + if err != nil { + return + } + variadic = p + } + for i := 0; i < t.NumOut(); i++ { + p, err = parameterFromType(t.Out(i)) + if err != nil { + return + } + out = append(out, p) + } + return +} + +func parameterFromType(t reflect.Type) (*Parameter, error) { + tt, err := typeFromType(t) + if err != nil { + return nil, err + } + return &Parameter{Type: tt}, nil +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var byteType = reflect.TypeOf(byte(0)) + +func typeFromType(t reflect.Type) (Type, error) { + // Hack workaround for https://golang.org/issue/3853. + // This explicit check should not be necessary. + if t == byteType { + return PredeclaredType("byte"), nil + } + + if imp := t.PkgPath(); imp != "" { + return &NamedType{ + Package: impPath(imp), + Type: t.Name(), + }, nil + } + + // only unnamed or predeclared types after here + + // Lots of types have element types. Let's do the parsing and error checking for all of them. + var elemType Type + switch t.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice: + var err error + elemType, err = typeFromType(t.Elem()) + if err != nil { + return nil, err + } + } + + switch t.Kind() { + case reflect.Array: + return &ArrayType{ + Len: t.Len(), + Type: elemType, + }, nil + case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String: + return PredeclaredType(t.Kind().String()), nil + case reflect.Chan: + var dir ChanDir + switch t.ChanDir() { + case reflect.RecvDir: + dir = RecvDir + case reflect.SendDir: + dir = SendDir + } + return &ChanType{ + Dir: dir, + Type: elemType, + }, nil + case reflect.Func: + in, variadic, out, err := funcArgsFromType(t) + if err != nil { + return nil, err + } + return &FuncType{ + In: in, + Out: out, + Variadic: variadic, + }, nil + case reflect.Interface: + // Two special interfaces. + if t.NumMethod() == 0 { + return PredeclaredType("interface{}"), nil + } + if t == errorType { + return PredeclaredType("error"), nil + } + case reflect.Map: + kt, err := typeFromType(t.Key()) + if err != nil { + return nil, err + } + return &MapType{ + Key: kt, + Value: elemType, + }, nil + case reflect.Ptr: + return &PointerType{ + Type: elemType, + }, nil + case reflect.Slice: + return &ArrayType{ + Len: -1, + Type: elemType, + }, nil + case reflect.Struct: + if t.NumField() == 0 { + return PredeclaredType("struct{}"), nil + } + } + + // TODO: Struct, UnsafePointer + return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind()) +} + +// impPath sanitizes the package path returned by `PkgPath` method of a reflect Type so that +// it is importable. PkgPath might return a path that includes "vendor". These paths do not +// compile, so we need to remove everything up to and including "/vendor/". +// See https://github.com/golang/go/issues/12019. +func impPath(imp string) string { + if strings.HasPrefix(imp, "vendor/") { + imp = "/" + imp + } + if i := strings.LastIndex(imp, "/vendor/"); i != -1 { + imp = imp[i+len("/vendor/"):] + } + return imp +} + +// ErrorInterface represent built-in error interface. +var ErrorInterface = Interface{ + Name: "error", + Methods: []*Method{ + { + Name: "Error", + Out: []*Parameter{ + { + Name: "", + Type: PredeclaredType("string"), + }, + }, + }, + }, +} diff --git a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d86..4d4b4aad6fe 100644 --- a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e2f..00000000000 --- a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3c8..00000000000 --- a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_format.go b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c790b..3ddab109ad9 100644 --- a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec53c..a84e09bd409 100644 --- a/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/ecs-init/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/ecs-init/vendor/github.com/stretchr/testify/assert/assertions.go b/ecs-init/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba926..0b7570f21c6 100644 --- a/ecs-init/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/ecs-init/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/ecs-init/vendor/github.com/stretchr/testify/assert/http_assertions.go b/ecs-init/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a75..861ed4b7ced 100644 --- a/ecs-init/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/ecs-init/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/ecs-init/vendor/github.com/stretchr/testify/require/require.go b/ecs-init/vendor/github.com/stretchr/testify/require/require.go index 63f85214767..506a82f8077 100644 --- a/ecs-init/vendor/github.com/stretchr/testify/require/require.go +++ b/ecs-init/vendor/github.com/stretchr/testify/require/require.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf t.FailNow() } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/ecs-init/vendor/github.com/stretchr/testify/require/require_forward.go b/ecs-init/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b09330a4..eee8310a5fa 100644 --- a/ecs-init/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/ecs-init/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/ecs-init/vendor/modules.txt b/ecs-init/vendor/modules.txt index bc68b49b5ef..4eb10ddeb82 100644 --- a/ecs-init/vendor/modules.txt +++ b/ecs-init/vendor/modules.txt @@ -9,9 +9,10 @@ github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid -# github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20180829222009-86f2a9fac6c5 -## explicit -github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml +# github.com/NVIDIA/go-nvml v0.12.4-0 +## explicit; go 1.20 +github.com/NVIDIA/go-nvml/pkg/dl +github.com/NVIDIA/go-nvml/pkg/nvml # github.com/aws/aws-sdk-go-v2 v1.31.0 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws @@ -207,6 +208,7 @@ github.com/gogo/protobuf/proto # github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock +github.com/golang/mock/mockgen/model # github.com/klauspost/compress v1.15.9 ## explicit; go 1.16 github.com/klauspost/compress @@ -252,8 +254,8 @@ github.com/pmezard/go-difflib/difflib # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/stretchr/testify v1.8.4 -## explicit; go 1.20 +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require # golang.org/x/mod v0.8.0