Provided by: nvidia-cuda-dev_9.1.85-3ubuntu1_amd64
NAME
Data types used by CUDA Runtime - Data Structures struct cudaChannelFormatDesc struct cudaDeviceProp struct cudaEglFrame struct cudaEglPlaneDesc struct cudaExtent struct cudaFuncAttributes struct cudaIpcEventHandle_t struct cudaIpcMemHandle_t struct cudaLaunchParams struct cudaMemcpy3DParms struct cudaMemcpy3DPeerParms struct cudaPitchedPtr struct cudaPointerAttributes struct cudaPos struct cudaResourceDesc struct cudaResourceViewDesc struct cudaTextureDesc struct surfaceReference struct textureReference Defines #define CUDA_EGL_MAX_PLANES 3 #define CUDA_IPC_HANDLE_SIZE 64 #define cudaArrayCubemap 0x04 #define cudaArrayDefault 0x00 #define cudaArrayLayered 0x01 #define cudaArraySurfaceLoadStore 0x02 #define cudaArrayTextureGather 0x08 #define cudaCooperativeLaunchMultiDeviceNoPostSync 0x02 #define cudaCooperativeLaunchMultiDeviceNoPreSync 0x01 #define cudaCpuDeviceId ((int)-1) #define cudaDeviceBlockingSync 0x04 #define cudaDeviceLmemResizeToMax 0x10 #define cudaDeviceMapHost 0x08 #define cudaDeviceMask 0x1f #define cudaDevicePropDontCare #define cudaDeviceScheduleAuto 0x00 #define cudaDeviceScheduleBlockingSync 0x04 #define cudaDeviceScheduleMask 0x07 #define cudaDeviceScheduleSpin 0x01 #define cudaDeviceScheduleYield 0x02 #define cudaEventBlockingSync 0x01 #define cudaEventDefault 0x00 #define cudaEventDisableTiming 0x02 #define cudaEventInterprocess 0x04 #define cudaHostAllocDefault 0x00 #define cudaHostAllocMapped 0x02 #define cudaHostAllocPortable 0x01 #define cudaHostAllocWriteCombined 0x04 #define cudaHostRegisterDefault 0x00 #define cudaHostRegisterIoMemory 0x04 #define cudaHostRegisterMapped 0x02 #define cudaHostRegisterPortable 0x01 #define cudaInvalidDeviceId ((int)-2) #define cudaIpcMemLazyEnablePeerAccess 0x01 #define cudaMemAttachGlobal 0x01 #define cudaMemAttachHost 0x02 #define cudaMemAttachSingle 0x04 #define cudaOccupancyDefault 0x00 #define cudaOccupancyDisableCachingOverride 0x01 #define cudaPeerAccessDefault 0x00 #define cudaStreamDefault 0x00 #define cudaStreamLegacy ((cudaStream_t)0x1) #define cudaStreamNonBlocking 0x01 #define cudaStreamPerThread ((cudaStream_t)0x2) Typedefs typedef struct cudaArray * cudaArray_const_t typedef struct cudaArray * cudaArray_t typedef struct CUeglStreamConnection_st * cudaEglStreamConnection typedef enum cudaError cudaError_t typedef struct CUevent_st * cudaEvent_t typedef struct cudaGraphicsResource * cudaGraphicsResource_t typedef struct cudaMipmappedArray * cudaMipmappedArray_const_t typedef struct cudaMipmappedArray * cudaMipmappedArray_t typedef enum cudaOutputMode cudaOutputMode_t typedef struct CUstream_st * cudaStream_t typedef unsigned long long cudaSurfaceObject_t typedef unsigned long long cudaTextureObject_t typedef struct CUuuid_st cudaUUID_t Enumerations enum cudaCGScope { cudaCGScopeInvalid = 0, cudaCGScopeGrid = 1, cudaCGScopeMultiGrid = 2 } enum cudaChannelFormatKind { cudaChannelFormatKindSigned = 0, cudaChannelFormatKindUnsigned = 1, cudaChannelFormatKindFloat = 2, cudaChannelFormatKindNone = 3 } enum cudaComputeMode { cudaComputeModeDefault = 0, cudaComputeModeExclusive = 1, cudaComputeModeProhibited = 2, cudaComputeModeExclusiveProcess = 3 } enum cudaDeviceAttr { cudaDevAttrMaxThreadsPerBlock = 1, cudaDevAttrMaxBlockDimX = 2, cudaDevAttrMaxBlockDimY = 3, cudaDevAttrMaxBlockDimZ = 4, cudaDevAttrMaxGridDimX = 5, cudaDevAttrMaxGridDimY = 6, cudaDevAttrMaxGridDimZ = 7, cudaDevAttrMaxSharedMemoryPerBlock = 8, cudaDevAttrTotalConstantMemory = 9, cudaDevAttrWarpSize = 10, cudaDevAttrMaxPitch = 11, cudaDevAttrMaxRegistersPerBlock = 12, cudaDevAttrClockRate = 13, cudaDevAttrTextureAlignment = 14, cudaDevAttrGpuOverlap = 15, cudaDevAttrMultiProcessorCount = 16, cudaDevAttrKernelExecTimeout = 17, cudaDevAttrIntegrated = 18, cudaDevAttrCanMapHostMemory = 19, cudaDevAttrComputeMode = 20, cudaDevAttrMaxTexture1DWidth = 21, cudaDevAttrMaxTexture2DWidth = 22, cudaDevAttrMaxTexture2DHeight = 23, cudaDevAttrMaxTexture3DWidth = 24, cudaDevAttrMaxTexture3DHeight = 25, cudaDevAttrMaxTexture3DDepth = 26, cudaDevAttrMaxTexture2DLayeredWidth = 27, cudaDevAttrMaxTexture2DLayeredHeight = 28, cudaDevAttrMaxTexture2DLayeredLayers = 29, cudaDevAttrSurfaceAlignment = 30, cudaDevAttrConcurrentKernels = 31, cudaDevAttrEccEnabled = 32, cudaDevAttrPciBusId = 33, cudaDevAttrPciDeviceId = 34, cudaDevAttrTccDriver = 35, cudaDevAttrMemoryClockRate = 36, cudaDevAttrGlobalMemoryBusWidth = 37, cudaDevAttrL2CacheSize = 38, cudaDevAttrMaxThreadsPerMultiProcessor = 39, cudaDevAttrAsyncEngineCount = 40, cudaDevAttrUnifiedAddressing = 41, cudaDevAttrMaxTexture1DLayeredWidth = 42, cudaDevAttrMaxTexture1DLayeredLayers = 43, cudaDevAttrMaxTexture2DGatherWidth = 45, cudaDevAttrMaxTexture2DGatherHeight = 46, cudaDevAttrMaxTexture3DWidthAlt = 47, cudaDevAttrMaxTexture3DHeightAlt = 48, cudaDevAttrMaxTexture3DDepthAlt = 49, cudaDevAttrPciDomainId = 50, cudaDevAttrTexturePitchAlignment = 51, cudaDevAttrMaxTextureCubemapWidth = 52, cudaDevAttrMaxTextureCubemapLayeredWidth = 53, cudaDevAttrMaxTextureCubemapLayeredLayers = 54, cudaDevAttrMaxSurface1DWidth = 55, cudaDevAttrMaxSurface2DWidth = 56, cudaDevAttrMaxSurface2DHeight = 57, cudaDevAttrMaxSurface3DWidth = 58, cudaDevAttrMaxSurface3DHeight = 59, cudaDevAttrMaxSurface3DDepth = 60, cudaDevAttrMaxSurface1DLayeredWidth = 61, cudaDevAttrMaxSurface1DLayeredLayers = 62, cudaDevAttrMaxSurface2DLayeredWidth = 63, cudaDevAttrMaxSurface2DLayeredHeight = 64, cudaDevAttrMaxSurface2DLayeredLayers = 65, cudaDevAttrMaxSurfaceCubemapWidth = 66, cudaDevAttrMaxSurfaceCubemapLayeredWidth = 67, cudaDevAttrMaxSurfaceCubemapLayeredLayers = 68, cudaDevAttrMaxTexture1DLinearWidth = 69, cudaDevAttrMaxTexture2DLinearWidth = 70, cudaDevAttrMaxTexture2DLinearHeight = 71, cudaDevAttrMaxTexture2DLinearPitch = 72, cudaDevAttrMaxTexture2DMipmappedWidth = 73, cudaDevAttrMaxTexture2DMipmappedHeight = 74, cudaDevAttrComputeCapabilityMajor = 75, cudaDevAttrComputeCapabilityMinor = 76, cudaDevAttrMaxTexture1DMipmappedWidth = 77, cudaDevAttrStreamPrioritiesSupported = 78, cudaDevAttrGlobalL1CacheSupported = 79, cudaDevAttrLocalL1CacheSupported = 80, cudaDevAttrMaxSharedMemoryPerMultiprocessor = 81, cudaDevAttrMaxRegistersPerMultiprocessor = 82, cudaDevAttrManagedMemory = 83, cudaDevAttrIsMultiGpuBoard = 84, cudaDevAttrMultiGpuBoardGroupID = 85, cudaDevAttrHostNativeAtomicSupported = 86, cudaDevAttrSingleToDoublePrecisionPerfRatio = 87, cudaDevAttrPageableMemoryAccess = 88, cudaDevAttrConcurrentManagedAccess = 89, cudaDevAttrComputePreemptionSupported = 90, cudaDevAttrCanUseHostPointerForRegisteredMem = 91, cudaDevAttrCooperativeLaunch = 95, cudaDevAttrCooperativeMultiDeviceLaunch = 96, cudaDevAttrMaxSharedMemoryPerBlockOptin = 97 } enum cudaDeviceP2PAttr { cudaDevP2PAttrPerformanceRank = 1, cudaDevP2PAttrAccessSupported = 2, cudaDevP2PAttrNativeAtomicSupported = 3 } enum cudaEglColorFormat { cudaEglColorFormatYUV420Planar = 0, cudaEglColorFormatYUV420SemiPlanar = 1, cudaEglColorFormatYUV422Planar = 2, cudaEglColorFormatYUV422SemiPlanar = 3, cudaEglColorFormatRGB = 4, cudaEglColorFormatBGR = 5, cudaEglColorFormatARGB = 6, cudaEglColorFormatRGBA = 7, cudaEglColorFormatL = 8, cudaEglColorFormatR = 9, cudaEglColorFormatYUV444Planar = 10, cudaEglColorFormatYUV444SemiPlanar = 11, cudaEglColorFormatYUYV422 = 12, cudaEglColorFormatUYVY422 = 13, cudaEglColorFormatABGR = 14, cudaEglColorFormatBGRA = 15, cudaEglColorFormatA = 16, cudaEglColorFormatRG = 17, cudaEglColorFormatAYUV = 18, cudaEglColorFormatYVU444SemiPlanar = 19, cudaEglColorFormatYVU422SemiPlanar = 20, cudaEglColorFormatYVU420SemiPlanar = 21, cudaEglColorFormatY10V10U10_444SemiPlanar = 22, cudaEglColorFormatY10V10U10_420SemiPlanar = 23, cudaEglColorFormatY12V12U12_444SemiPlanar = 24, cudaEglColorFormatY12V12U12_420SemiPlanar = 25, cudaEglColorFormatVYUY_ER = 26, cudaEglColorFormatUYVY_ER = 27, cudaEglColorFormatYUYV_ER = 28, cudaEglColorFormatYVYU_ER = 29, cudaEglColorFormatYUV_ER = 30, cudaEglColorFormatYUVA_ER = 31, cudaEglColorFormatAYUV_ER = 32, cudaEglColorFormatYUV444Planar_ER = 33, cudaEglColorFormatYUV422Planar_ER = 34, cudaEglColorFormatYUV420Planar_ER = 35, cudaEglColorFormatYUV444SemiPlanar_ER = 36, cudaEglColorFormatYUV422SemiPlanar_ER = 37, cudaEglColorFormatYUV420SemiPlanar_ER = 38, cudaEglColorFormatYVU444Planar_ER = 39, cudaEglColorFormatYVU422Planar_ER = 40, cudaEglColorFormatYVU420Planar_ER = 41, cudaEglColorFormatYVU444SemiPlanar_ER = 42, cudaEglColorFormatYVU422SemiPlanar_ER = 43, cudaEglColorFormatYVU420SemiPlanar_ER = 44, cudaEglColorFormatBayerRGGB = 45, cudaEglColorFormatBayerBGGR = 46, cudaEglColorFormatBayerGRBG = 47, cudaEglColorFormatBayerGBRG = 48, cudaEglColorFormatBayer10RGGB = 49, cudaEglColorFormatBayer10BGGR = 50, cudaEglColorFormatBayer10GRBG = 51, cudaEglColorFormatBayer10GBRG = 52, cudaEglColorFormatBayer12RGGB = 53, cudaEglColorFormatBayer12BGGR = 54, cudaEglColorFormatBayer12GRBG = 55, cudaEglColorFormatBayer12GBRG = 56, cudaEglColorFormatBayer14RGGB = 57, cudaEglColorFormatBayer14BGGR = 58, cudaEglColorFormatBayer14GRBG = 59, cudaEglColorFormatBayer14GBRG = 60, cudaEglColorFormatBayer20RGGB = 61, cudaEglColorFormatBayer20BGGR = 62, cudaEglColorFormatBayer20GRBG = 63, cudaEglColorFormatBayer20GBRG = 64, cudaEglColorFormatYVU444Planar = 65, cudaEglColorFormatYVU422Planar = 66, cudaEglColorFormatYVU420Planar = 67 } enum cudaEglFrameType { cudaEglFrameTypeArray = 0, cudaEglFrameTypePitch = 1 } enum cudaEglResourceLocationFlags { cudaEglResourceLocationSysmem = 0x00, cudaEglResourceLocationVidmem = 0x01 } enum cudaError { cudaSuccess = 0, cudaErrorMissingConfiguration = 1, cudaErrorMemoryAllocation = 2, cudaErrorInitializationError = 3, cudaErrorLaunchFailure = 4, cudaErrorPriorLaunchFailure = 5, cudaErrorLaunchTimeout = 6, cudaErrorLaunchOutOfResources = 7, cudaErrorInvalidDeviceFunction = 8, cudaErrorInvalidConfiguration = 9, cudaErrorInvalidDevice = 10, cudaErrorInvalidValue = 11, cudaErrorInvalidPitchValue = 12, cudaErrorInvalidSymbol = 13, cudaErrorMapBufferObjectFailed = 14, cudaErrorUnmapBufferObjectFailed = 15, cudaErrorInvalidHostPointer = 16, cudaErrorInvalidDevicePointer = 17, cudaErrorInvalidTexture = 18, cudaErrorInvalidTextureBinding = 19, cudaErrorInvalidChannelDescriptor = 20, cudaErrorInvalidMemcpyDirection = 21, cudaErrorAddressOfConstant = 22, cudaErrorTextureFetchFailed = 23, cudaErrorTextureNotBound = 24, cudaErrorSynchronizationError = 25, cudaErrorInvalidFilterSetting = 26, cudaErrorInvalidNormSetting = 27, cudaErrorMixedDeviceExecution = 28, cudaErrorCudartUnloading = 29, cudaErrorUnknown = 30, cudaErrorNotYetImplemented = 31, cudaErrorMemoryValueTooLarge = 32, cudaErrorInvalidResourceHandle = 33, cudaErrorNotReady = 34, cudaErrorInsufficientDriver = 35, cudaErrorSetOnActiveProcess = 36, cudaErrorInvalidSurface = 37, cudaErrorNoDevice = 38, cudaErrorECCUncorrectable = 39, cudaErrorSharedObjectSymbolNotFound = 40, cudaErrorSharedObjectInitFailed = 41, cudaErrorUnsupportedLimit = 42, cudaErrorDuplicateVariableName = 43, cudaErrorDuplicateTextureName = 44, cudaErrorDuplicateSurfaceName = 45, cudaErrorDevicesUnavailable = 46, cudaErrorInvalidKernelImage = 47, cudaErrorNoKernelImageForDevice = 48, cudaErrorIncompatibleDriverContext = 49, cudaErrorPeerAccessAlreadyEnabled = 50, cudaErrorPeerAccessNotEnabled = 51, cudaErrorDeviceAlreadyInUse = 54, cudaErrorProfilerDisabled = 55, cudaErrorProfilerNotInitialized = 56, cudaErrorProfilerAlreadyStarted = 57, cudaErrorProfilerAlreadyStopped = 58, cudaErrorAssert = 59, cudaErrorTooManyPeers = 60, cudaErrorHostMemoryAlreadyRegistered = 61, cudaErrorHostMemoryNotRegistered = 62, cudaErrorOperatingSystem = 63, cudaErrorPeerAccessUnsupported = 64, cudaErrorLaunchMaxDepthExceeded = 65, cudaErrorLaunchFileScopedTex = 66, cudaErrorLaunchFileScopedSurf = 67, cudaErrorSyncDepthExceeded = 68, cudaErrorLaunchPendingCountExceeded = 69, cudaErrorNotPermitted = 70, cudaErrorNotSupported = 71, cudaErrorHardwareStackError = 72, cudaErrorIllegalInstruction = 73, cudaErrorMisalignedAddress = 74, cudaErrorInvalidAddressSpace = 75, cudaErrorInvalidPc = 76, cudaErrorIllegalAddress = 77, cudaErrorInvalidPtx = 78, cudaErrorInvalidGraphicsContext = 79, cudaErrorNvlinkUncorrectable = 80, cudaErrorJitCompilerNotFound = 81, cudaErrorCooperativeLaunchTooLarge = 82, cudaErrorStartupFailure = 0x7f, cudaErrorApiFailureBase = 10000 } enum cudaFuncAttribute { cudaFuncAttributeMaxDynamicSharedMemorySize = 8, cudaFuncAttributePreferredSharedMemoryCarveout = 9 } enum cudaFuncCache { cudaFuncCachePreferNone = 0, cudaFuncCachePreferShared = 1, cudaFuncCachePreferL1 = 2, cudaFuncCachePreferEqual = 3 } enum cudaGraphicsCubeFace { cudaGraphicsCubeFacePositiveX = 0x00, cudaGraphicsCubeFaceNegativeX = 0x01, cudaGraphicsCubeFacePositiveY = 0x02, cudaGraphicsCubeFaceNegativeY = 0x03, cudaGraphicsCubeFacePositiveZ = 0x04, cudaGraphicsCubeFaceNegativeZ = 0x05 } enum cudaGraphicsMapFlags { cudaGraphicsMapFlagsNone = 0, cudaGraphicsMapFlagsReadOnly = 1, cudaGraphicsMapFlagsWriteDiscard = 2 } enum cudaGraphicsRegisterFlags { cudaGraphicsRegisterFlagsNone = 0, cudaGraphicsRegisterFlagsReadOnly = 1, cudaGraphicsRegisterFlagsWriteDiscard = 2, cudaGraphicsRegisterFlagsSurfaceLoadStore = 4, cudaGraphicsRegisterFlagsTextureGather = 8 } enum cudaLimit { cudaLimitStackSize = 0x00, cudaLimitPrintfFifoSize = 0x01, cudaLimitMallocHeapSize = 0x02, cudaLimitDevRuntimeSyncDepth = 0x03, cudaLimitDevRuntimePendingLaunchCount = 0x04 } enum cudaMemcpyKind { cudaMemcpyHostToHost = 0, cudaMemcpyHostToDevice = 1, cudaMemcpyDeviceToHost = 2, cudaMemcpyDeviceToDevice = 3, cudaMemcpyDefault = 4 } enum cudaMemoryAdvise { cudaMemAdviseSetReadMostly = 1, cudaMemAdviseUnsetReadMostly = 2, cudaMemAdviseSetPreferredLocation = 3, cudaMemAdviseUnsetPreferredLocation = 4, cudaMemAdviseSetAccessedBy = 5, cudaMemAdviseUnsetAccessedBy = 6 } enum cudaMemoryType { cudaMemoryTypeHost = 1, cudaMemoryTypeDevice = 2 } enum cudaMemRangeAttribute { cudaMemRangeAttributeReadMostly = 1, cudaMemRangeAttributePreferredLocation = 2, cudaMemRangeAttributeAccessedBy = 3, cudaMemRangeAttributeLastPrefetchLocation = 4 } enum cudaOutputMode { cudaKeyValuePair = 0x00, cudaCSV = 0x01 } enum cudaResourceType { cudaResourceTypeArray = 0x00, cudaResourceTypeMipmappedArray = 0x01, cudaResourceTypeLinear = 0x02, cudaResourceTypePitch2D = 0x03 } enum cudaResourceViewFormat { cudaResViewFormatNone = 0x00, cudaResViewFormatUnsignedChar1 = 0x01, cudaResViewFormatUnsignedChar2 = 0x02, cudaResViewFormatUnsignedChar4 = 0x03, cudaResViewFormatSignedChar1 = 0x04, cudaResViewFormatSignedChar2 = 0x05, cudaResViewFormatSignedChar4 = 0x06, cudaResViewFormatUnsignedShort1 = 0x07, cudaResViewFormatUnsignedShort2 = 0x08, cudaResViewFormatUnsignedShort4 = 0x09, cudaResViewFormatSignedShort1 = 0x0a, cudaResViewFormatSignedShort2 = 0x0b, cudaResViewFormatSignedShort4 = 0x0c, cudaResViewFormatUnsignedInt1 = 0x0d, cudaResViewFormatUnsignedInt2 = 0x0e, cudaResViewFormatUnsignedInt4 = 0x0f, cudaResViewFormatSignedInt1 = 0x10, cudaResViewFormatSignedInt2 = 0x11, cudaResViewFormatSignedInt4 = 0x12, cudaResViewFormatHalf1 = 0x13, cudaResViewFormatHalf2 = 0x14, cudaResViewFormatHalf4 = 0x15, cudaResViewFormatFloat1 = 0x16, cudaResViewFormatFloat2 = 0x17, cudaResViewFormatFloat4 = 0x18, cudaResViewFormatUnsignedBlockCompressed1 = 0x19, cudaResViewFormatUnsignedBlockCompressed2 = 0x1a, cudaResViewFormatUnsignedBlockCompressed3 = 0x1b, cudaResViewFormatUnsignedBlockCompressed4 = 0x1c, cudaResViewFormatSignedBlockCompressed4 = 0x1d, cudaResViewFormatUnsignedBlockCompressed5 = 0x1e, cudaResViewFormatSignedBlockCompressed5 = 0x1f, cudaResViewFormatUnsignedBlockCompressed6H = 0x20, cudaResViewFormatSignedBlockCompressed6H = 0x21, cudaResViewFormatUnsignedBlockCompressed7 = 0x22 } enum cudaSharedCarveout enum cudaSharedMemConfig enum cudaSurfaceBoundaryMode { cudaBoundaryModeZero = 0, cudaBoundaryModeClamp = 1, cudaBoundaryModeTrap = 2 } enum cudaSurfaceFormatMode { cudaFormatModeForced = 0, cudaFormatModeAuto = 1 } enum cudaTextureAddressMode { cudaAddressModeWrap = 0, cudaAddressModeClamp = 1, cudaAddressModeMirror = 2, cudaAddressModeBorder = 3 } enum cudaTextureFilterMode { cudaFilterModePoint = 0, cudaFilterModeLinear = 1 } enum cudaTextureReadMode { cudaReadModeElementType = 0, cudaReadModeNormalizedFloat = 1 }
Define Documentation
#define CUDA_EGL_MAX_PLANES 3 Maximum number of planes per frame #define CUDA_IPC_HANDLE_SIZE 64 CUDA IPC Handle Size #define cudaArrayCubemap 0x04 Must be set in cudaMalloc3DArray to create a cubemap CUDA array #define cudaArrayDefault 0x00 Default CUDA array allocation flag #define cudaArrayLayered 0x01 Must be set in cudaMalloc3DArray to create a layered CUDA array #define cudaArraySurfaceLoadStore 0x02 Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array #define cudaArrayTextureGather 0x08 Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array #define cudaCooperativeLaunchMultiDeviceNoPostSync 0x02 If set, any subsequent work pushed in a stream that participated in a call to cudaLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. #define cudaCooperativeLaunchMultiDeviceNoPreSync 0x01 If set, each kernel launched as part of cudaLaunchCooperativeKernelMultiDevice only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. #define cudaCpuDeviceId ((int)-1) Device id that represents the CPU #define cudaDeviceBlockingSync 0x04 Device flag - Use blocking synchronization Deprecated This flag was deprecated as of CUDA 4.0 and replaced with cudaDeviceScheduleBlockingSync. #define cudaDeviceLmemResizeToMax 0x10 Device flag - Keep local memory allocation after launch #define cudaDeviceMapHost 0x08 Device flag - Support mapped pinned allocations #define cudaDeviceMask 0x1f Device flags mask #define cudaDevicePropDontCare Empty device properties #define cudaDeviceScheduleAuto 0x00 Device flag - Automatic scheduling #define cudaDeviceScheduleBlockingSync 0x04 Device flag - Use blocking synchronization #define cudaDeviceScheduleMask 0x07 Device schedule flags mask #define cudaDeviceScheduleSpin 0x01 Device flag - Spin default scheduling #define cudaDeviceScheduleYield 0x02 Device flag - Yield default scheduling #define cudaEventBlockingSync 0x01 Event uses blocking synchronization #define cudaEventDefault 0x00 Default event flag #define cudaEventDisableTiming 0x02 Event will not record timing data #define cudaEventInterprocess 0x04 Event is suitable for interprocess use. cudaEventDisableTiming must be set #define cudaHostAllocDefault 0x00 Default page-locked allocation flag #define cudaHostAllocMapped 0x02 Map allocation into device space #define cudaHostAllocPortable 0x01 Pinned memory accessible by all CUDA contexts #define cudaHostAllocWriteCombined 0x04 Write-combined memory #define cudaHostRegisterDefault 0x00 Default host memory registration flag #define cudaHostRegisterIoMemory 0x04 Memory-mapped I/O space #define cudaHostRegisterMapped 0x02 Map registered memory into device space #define cudaHostRegisterPortable 0x01 Pinned memory accessible by all CUDA contexts #define cudaInvalidDeviceId ((int)-2) Device id that represents an invalid device #define cudaIpcMemLazyEnablePeerAccess 0x01 Automatically enable peer access between remote devices as needed #define cudaMemAttachGlobal 0x01 Memory can be accessed by any stream on any device #define cudaMemAttachHost 0x02 Memory cannot be accessed by any stream on any device #define cudaMemAttachSingle 0x04 Memory can only be accessed by a single stream on the associated device #define cudaOccupancyDefault 0x00 Default behavior #define cudaOccupancyDisableCachingOverride 0x01 Assume global caching is enabled and cannot be automatically turned off #define cudaPeerAccessDefault 0x00 Default peer addressing enable flag #define cudaStreamDefault 0x00 Default stream flag #define cudaStreamLegacy ((cudaStream_t)0x1) Legacy stream handle Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior. See details of the . #define cudaStreamNonBlocking 0x01 Stream does not synchronize with stream 0 (the NULL stream) #define cudaStreamPerThread ((cudaStream_t)0x2) Per-thread stream handle Stream handle that can be passed as a cudaStream_t to use an implicit stream with per- thread synchronization behavior. See details of the .
Typedef Documentation
typedef struct cudaArray* cudaArray_const_t CUDA array (as source copy argument) typedef struct cudaArray* cudaArray_t CUDA array typedef struct CUeglStreamConnection_st* cudaEglStreamConnection CUDA EGLSream Connection typedef enum cudaError cudaError_t CUDA Error types typedef struct CUevent_st* cudaEvent_t CUDA event types typedef struct cudaGraphicsResource* cudaGraphicsResource_t CUDA graphics resource types typedef struct cudaMipmappedArray* cudaMipmappedArray_const_t CUDA mipmapped array (as source argument) typedef struct cudaMipmappedArray* cudaMipmappedArray_t CUDA mipmapped array typedef enum cudaOutputMode cudaOutputMode_t CUDA output file modes typedef struct CUstream_st* cudaStream_t CUDA stream typedef unsigned long long cudaSurfaceObject_t An opaque value that represents a CUDA Surface object typedef unsigned long long cudaTextureObject_t An opaque value that represents a CUDA texture object typedef struct CUuuid_st cudaUUID_t CUDA UUID types
Enumeration Type Documentation
enum cudaCGScope CUDA cooperative group scope Enumerator: cudaCGScopeInvalid Invalid cooperative group scope cudaCGScopeGrid Scope represented by a grid_group cudaCGScopeMultiGrid Scope represented by a multi_grid_group enum cudaChannelFormatKind Channel format kind Enumerator: cudaChannelFormatKindSigned Signed channel format cudaChannelFormatKindUnsigned Unsigned channel format cudaChannelFormatKindFloat Float channel format cudaChannelFormatKindNone No channel format enum cudaComputeMode CUDA device compute modes Enumerator: cudaComputeModeDefault Default compute mode (Multiple threads can use cudaSetDevice() with this device) cudaComputeModeExclusive Compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice() with this device) cudaComputeModeProhibited Compute-prohibited mode (No threads can use cudaSetDevice() with this device) cudaComputeModeExclusiveProcess Compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice() with this device) enum cudaDeviceAttr CUDA device attributes Enumerator: cudaDevAttrMaxThreadsPerBlock Maximum number of threads per block cudaDevAttrMaxBlockDimX Maximum block dimension X cudaDevAttrMaxBlockDimY Maximum block dimension Y cudaDevAttrMaxBlockDimZ Maximum block dimension Z cudaDevAttrMaxGridDimX Maximum grid dimension X cudaDevAttrMaxGridDimY Maximum grid dimension Y cudaDevAttrMaxGridDimZ Maximum grid dimension Z cudaDevAttrMaxSharedMemoryPerBlock Maximum shared memory available per block in bytes cudaDevAttrTotalConstantMemory Memory available on device for __constant__ variables in a CUDA C kernel in bytes cudaDevAttrWarpSize Warp size in threads cudaDevAttrMaxPitch Maximum pitch in bytes allowed by memory copies cudaDevAttrMaxRegistersPerBlock Maximum number of 32-bit registers available per block cudaDevAttrClockRate Peak clock frequency in kilohertz cudaDevAttrTextureAlignment Alignment requirement for textures cudaDevAttrGpuOverlap Device can possibly copy memory and execute a kernel concurrently cudaDevAttrMultiProcessorCount Number of multiprocessors on device cudaDevAttrKernelExecTimeout Specifies whether there is a run time limit on kernels cudaDevAttrIntegrated Device is integrated with host memory cudaDevAttrCanMapHostMemory Device can map host memory into CUDA address space cudaDevAttrComputeMode Compute mode (See cudaComputeMode for details) cudaDevAttrMaxTexture1DWidth Maximum 1D texture width cudaDevAttrMaxTexture2DWidth Maximum 2D texture width cudaDevAttrMaxTexture2DHeight Maximum 2D texture height cudaDevAttrMaxTexture3DWidth Maximum 3D texture width cudaDevAttrMaxTexture3DHeight Maximum 3D texture height cudaDevAttrMaxTexture3DDepth Maximum 3D texture depth cudaDevAttrMaxTexture2DLayeredWidth Maximum 2D layered texture width cudaDevAttrMaxTexture2DLayeredHeight Maximum 2D layered texture height cudaDevAttrMaxTexture2DLayeredLayers Maximum layers in a 2D layered texture cudaDevAttrSurfaceAlignment Alignment requirement for surfaces cudaDevAttrConcurrentKernels Device can possibly execute multiple kernels concurrently cudaDevAttrEccEnabled Device has ECC support enabled cudaDevAttrPciBusId PCI bus ID of the device cudaDevAttrPciDeviceId PCI device ID of the device cudaDevAttrTccDriver Device is using TCC driver model cudaDevAttrMemoryClockRate Peak memory clock frequency in kilohertz cudaDevAttrGlobalMemoryBusWidth Global memory bus width in bits cudaDevAttrL2CacheSize Size of L2 cache in bytes cudaDevAttrMaxThreadsPerMultiProcessor Maximum resident threads per multiprocessor cudaDevAttrAsyncEngineCount Number of asynchronous engines cudaDevAttrUnifiedAddressing Device shares a unified address space with the host cudaDevAttrMaxTexture1DLayeredWidth Maximum 1D layered texture width cudaDevAttrMaxTexture1DLayeredLayers Maximum layers in a 1D layered texture cudaDevAttrMaxTexture2DGatherWidth Maximum 2D texture width if cudaArrayTextureGather is set cudaDevAttrMaxTexture2DGatherHeight Maximum 2D texture height if cudaArrayTextureGather is set cudaDevAttrMaxTexture3DWidthAlt Alternate maximum 3D texture width cudaDevAttrMaxTexture3DHeightAlt Alternate maximum 3D texture height cudaDevAttrMaxTexture3DDepthAlt Alternate maximum 3D texture depth cudaDevAttrPciDomainId PCI domain ID of the device cudaDevAttrTexturePitchAlignment Pitch alignment requirement for textures cudaDevAttrMaxTextureCubemapWidth Maximum cubemap texture width/height cudaDevAttrMaxTextureCubemapLayeredWidth Maximum cubemap layered texture width/height cudaDevAttrMaxTextureCubemapLayeredLayers Maximum layers in a cubemap layered texture cudaDevAttrMaxSurface1DWidth Maximum 1D surface width cudaDevAttrMaxSurface2DWidth Maximum 2D surface width cudaDevAttrMaxSurface2DHeight Maximum 2D surface height cudaDevAttrMaxSurface3DWidth Maximum 3D surface width cudaDevAttrMaxSurface3DHeight Maximum 3D surface height cudaDevAttrMaxSurface3DDepth Maximum 3D surface depth cudaDevAttrMaxSurface1DLayeredWidth Maximum 1D layered surface width cudaDevAttrMaxSurface1DLayeredLayers Maximum layers in a 1D layered surface cudaDevAttrMaxSurface2DLayeredWidth Maximum 2D layered surface width cudaDevAttrMaxSurface2DLayeredHeight Maximum 2D layered surface height cudaDevAttrMaxSurface2DLayeredLayers Maximum layers in a 2D layered surface cudaDevAttrMaxSurfaceCubemapWidth Maximum cubemap surface width cudaDevAttrMaxSurfaceCubemapLayeredWidth Maximum cubemap layered surface width cudaDevAttrMaxSurfaceCubemapLayeredLayers Maximum layers in a cubemap layered surface cudaDevAttrMaxTexture1DLinearWidth Maximum 1D linear texture width cudaDevAttrMaxTexture2DLinearWidth Maximum 2D linear texture width cudaDevAttrMaxTexture2DLinearHeight Maximum 2D linear texture height cudaDevAttrMaxTexture2DLinearPitch Maximum 2D linear texture pitch in bytes cudaDevAttrMaxTexture2DMipmappedWidth Maximum mipmapped 2D texture width cudaDevAttrMaxTexture2DMipmappedHeight Maximum mipmapped 2D texture height cudaDevAttrComputeCapabilityMajor Major compute capability version number cudaDevAttrComputeCapabilityMinor Minor compute capability version number cudaDevAttrMaxTexture1DMipmappedWidth Maximum mipmapped 1D texture width cudaDevAttrStreamPrioritiesSupported Device supports stream priorities cudaDevAttrGlobalL1CacheSupported Device supports caching globals in L1 cudaDevAttrLocalL1CacheSupported Device supports caching locals in L1 cudaDevAttrMaxSharedMemoryPerMultiprocessor Maximum shared memory available per multiprocessor in bytes cudaDevAttrMaxRegistersPerMultiprocessor Maximum number of 32-bit registers available per multiprocessor cudaDevAttrManagedMemory Device can allocate managed memory on this system cudaDevAttrIsMultiGpuBoard Device is on a multi-GPU board cudaDevAttrMultiGpuBoardGroupID Unique identifier for a group of devices on the same multi-GPU board cudaDevAttrHostNativeAtomicSupported Link between the device and the host supports native atomic operations cudaDevAttrSingleToDoublePrecisionPerfRatio Ratio of single precision performance (in floating-point operations per second) to double precision performance cudaDevAttrPageableMemoryAccess Device supports coherently accessing pageable memory without calling cudaHostRegister on it cudaDevAttrConcurrentManagedAccess Device can coherently access managed memory concurrently with the CPU cudaDevAttrComputePreemptionSupported Device supports Compute Preemption cudaDevAttrCanUseHostPointerForRegisteredMem Device can access host registered memory at the same virtual address as the CPU cudaDevAttrCooperativeLaunch Device supports launching cooperative kernels via cudaLaunchCooperativeKernel cudaDevAttrCooperativeMultiDeviceLaunch Device can participate in cooperative kernels launched via cudaLaunchCooperativeKernelMultiDevice cudaDevAttrMaxSharedMemoryPerBlockOptin The maximum optin shared memory per block. This value may vary by chip. See cudaFuncSetAttribute enum cudaDeviceP2PAttr CUDA device P2P attributes Enumerator: cudaDevP2PAttrPerformanceRank A relative value indicating the performance of the link between two devices cudaDevP2PAttrAccessSupported Peer access is enabled cudaDevP2PAttrNativeAtomicSupported Native atomic operation over the link supported enum cudaEglColorFormat CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops. Enumerator: cudaEglColorFormatYUV420Planar Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatYUV420SemiPlanar Y, UV in two surfaces (UV as one surface), width, height ratio same as YUV420Planar. cudaEglColorFormatYUV422Planar Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. cudaEglColorFormatYUV422SemiPlanar Y, UV in two surfaces, width, height ratio same as YUV422Planar. cudaEglColorFormatRGB R/G/B three channels in one surface with RGB byte ordering. cudaEglColorFormatBGR R/G/B three channels in one surface with BGR byte ordering. cudaEglColorFormatARGB R/G/B/A four channels in one surface with ARGB byte ordering. cudaEglColorFormatRGBA R/G/B/A four channels in one surface with RGBA byte ordering. cudaEglColorFormatL single luminance channel in one surface. cudaEglColorFormatR single color channel in one surface. cudaEglColorFormatYUV444Planar Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. cudaEglColorFormatYUV444SemiPlanar Y, UV in two surfaces (UV as one surface), width, height ratio same as YUV444Planar. cudaEglColorFormatYUYV422 Y, U, V in one surface, interleaved as YUYV. cudaEglColorFormatUYVY422 Y, U, V in one surface, interleaved as UYVY. cudaEglColorFormatABGR R/G/B/A four channels in one surface with ABGR byte ordering. cudaEglColorFormatBGRA R/G/B/A four channels in one surface with BGRA byte ordering. cudaEglColorFormatA Alpha color format - one channel in one surface. cudaEglColorFormatRG R/G color format - two channels in one surface with RG byte ordering cudaEglColorFormatAYUV Y, U, V, A four channels in one surface, interleaved as AYUV. cudaEglColorFormatYVU444SemiPlanar Y, VU in two surfaces (VU as one surface), U/V width = Y width, U/V height = Y height. cudaEglColorFormatYVU422SemiPlanar Y, VU in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = Y height. cudaEglColorFormatYVU420SemiPlanar Y, VU in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatY10V10U10_444SemiPlanar Y10, V10U10 in two surfaces (VU as one surface), U/V width = Y width, U/V height = Y height. cudaEglColorFormatY10V10U10_420SemiPlanar Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatY12V12U12_444SemiPlanar Y12, V12U12 in two surfaces (VU as one surface), U/V width = Y width, U/V height = Y height. cudaEglColorFormatY12V12U12_420SemiPlanar Y12, V12U12 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatVYUY_ER Extended Range Y, U, V in one surface, interleaved as VYUY. cudaEglColorFormatUYVY_ER Extended Range Y, U, V in one surface, interleaved as UYVY. cudaEglColorFormatYUYV_ER Extended Range Y, U, V in one surface, interleaved as YUYV. cudaEglColorFormatYVYU_ER Extended Range Y, U, V in one surface, interleaved as YVYU. cudaEglColorFormatYUV_ER Extended Range Y, U, V three channels in one surface, interleaved as YUV. cudaEglColorFormatYUVA_ER Extended Range Y, U, V, A four channels in one surface, interleaved as YUVA. cudaEglColorFormatAYUV_ER Extended Range Y, U, V, A four channels in one surface, interleaved as AYUV. cudaEglColorFormatYUV444Planar_ER Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. cudaEglColorFormatYUV422Planar_ER Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. cudaEglColorFormatYUV420Planar_ER Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatYUV444SemiPlanar_ER Extended Range Y, UV in two surfaces (UV as one surface), U/V width = Y width, U/V height = Y height. cudaEglColorFormatYUV422SemiPlanar_ER Extended Range Y, UV in two surfaces (UV as one surface), U/V width = 1/2 Y width, U/V height = Y height. cudaEglColorFormatYUV420SemiPlanar_ER Extended Range Y, UV in two surfaces (UV as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatYVU444Planar_ER Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. cudaEglColorFormatYVU422Planar_ER Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. cudaEglColorFormatYVU420Planar_ER Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatYVU444SemiPlanar_ER Extended Range Y, VU in two surfaces (VU as one surface), U/V width = Y width, U/V height = Y height. cudaEglColorFormatYVU422SemiPlanar_ER Extended Range Y, VU in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = Y height. cudaEglColorFormatYVU420SemiPlanar_ER Extended Range Y, VU in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. cudaEglColorFormatBayerRGGB Bayer format - one channel in one surface with interleaved RGGB ordering. cudaEglColorFormatBayerBGGR Bayer format - one channel in one surface with interleaved BGGR ordering. cudaEglColorFormatBayerGRBG Bayer format - one channel in one surface with interleaved GRBG ordering. cudaEglColorFormatBayerGBRG Bayer format - one channel in one surface with interleaved GBRG ordering. cudaEglColorFormatBayer10RGGB Bayer10 format - one channel in one surface with interleaved RGGB ordering. 10 bits used 6 bits No-op. cudaEglColorFormatBayer10BGGR Bayer10 format - one channel in one surface with interleaved BGGR ordering. 10 bits used 6 bits No-op. cudaEglColorFormatBayer10GRBG Bayer10 format - one channel in one surface with interleaved GRBG ordering. 10 bits used 6 bits No-op. cudaEglColorFormatBayer10GBRG Bayer10 format - one channel in one surface with interleaved GBRG ordering. 10 bits used 6 bits No-op. cudaEglColorFormatBayer12RGGB Bayer12 format - one channel in one surface with interleaved RGGB ordering. 12 bits used 4 bits No-op. cudaEglColorFormatBayer12BGGR Bayer12 format - one channel in one surface with interleaved BGGR ordering. 12 bits used 4 bits No-op. cudaEglColorFormatBayer12GRBG Bayer12 format - one channel in one surface with interleaved GRBG ordering. 12 bits used 4 bits No-op. cudaEglColorFormatBayer12GBRG Bayer12 format - one channel in one surface with interleaved GBRG ordering. 12 bits used 4 bits No-op. cudaEglColorFormatBayer14RGGB Bayer14 format - one channel in one surface with interleaved RGGB ordering. 14 bits used 2 bits No-op. cudaEglColorFormatBayer14BGGR Bayer14 format - one channel in one surface with interleaved BGGR ordering. 14 bits used 2 bits No-op. cudaEglColorFormatBayer14GRBG Bayer14 format - one channel in one surface with interleaved GRBG ordering. 14 bits used 2 bits No-op. cudaEglColorFormatBayer14GBRG Bayer14 format - one channel in one surface with interleaved GBRG ordering. 14 bits used 2 bits No-op. cudaEglColorFormatBayer20RGGB Bayer20 format - one channel in one surface with interleaved RGGB ordering. 20 bits used 12 bits No-op. cudaEglColorFormatBayer20BGGR Bayer20 format - one channel in one surface with interleaved BGGR ordering. 20 bits used 12 bits No-op. cudaEglColorFormatBayer20GRBG Bayer20 format - one channel in one surface with interleaved GRBG ordering. 20 bits used 12 bits No-op. cudaEglColorFormatBayer20GBRG Bayer20 format - one channel in one surface with interleaved GBRG ordering. 20 bits used 12 bits No-op. cudaEglColorFormatYVU444Planar Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. cudaEglColorFormatYVU422Planar Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. cudaEglColorFormatYVU420Planar Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. enum cudaEglFrameType CUDA EglFrame type - array or pointer Enumerator: cudaEglFrameTypeArray Frame type CUDA array cudaEglFrameTypePitch Frame type CUDA pointer enum cudaEglResourceLocationFlags Resource location flags- sysmem or vidmem For CUDA context on iGPU, since video and system memory are equivalent - these flags will not have an effect on the execution. For CUDA context on dGPU, applications can use the flag cudaEglResourceLocationFlags to give a hint about the desired location. cudaEglResourceLocationSysmem - the frame data is made resident on the system memory to be accessed by CUDA. cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated video memory to be accessed by CUDA. There may be an additional latency due to new allocation and data migration, if the frame is produced on a different memory. Enumerator: cudaEglResourceLocationSysmem Resource location sysmem cudaEglResourceLocationVidmem Resource location vidmem enum cudaError CUDA error types Enumerator: cudaSuccess The API call returned with no errors. In the case of query calls, this can also mean that the operation being queried is complete (see cudaEventQuery() and cudaStreamQuery()). cudaErrorMissingConfiguration The device function being invoked (usually via cudaLaunchKernel()) was not previously configured via the cudaConfigureCall() function. cudaErrorMemoryAllocation The API call failed because it was unable to allocate enough memory to perform the requested operation. cudaErrorInitializationError The API call failed because the CUDA driver and runtime could not be initialized. cudaErrorLaunchFailure An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. The device cannot be used until cudaThreadExit() is called. All existing device memory allocations are invalid and must be reconstructed if the program is to continue using CUDA. cudaErrorPriorLaunchFailure This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. Deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release. cudaErrorLaunchTimeout This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property kernelExecTimeoutEnabled for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. cudaErrorLaunchOutOfResources This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to cudaErrorInvalidConfiguration, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. cudaErrorInvalidDeviceFunction The requested device function does not exist or is not compiled for the proper device architecture. cudaErrorInvalidConfiguration This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See cudaDeviceProp for more device limitations. cudaErrorInvalidDevice This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device. cudaErrorInvalidValue This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. cudaErrorInvalidPitchValue This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch. cudaErrorInvalidSymbol This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier. cudaErrorMapBufferObjectFailed This indicates that the buffer object could not be mapped. cudaErrorUnmapBufferObjectFailed This indicates that the buffer object could not be unmapped. cudaErrorInvalidHostPointer This indicates that at least one host pointer passed to the API call is not a valid host pointer. cudaErrorInvalidDevicePointer This indicates that at least one device pointer passed to the API call is not a valid device pointer. cudaErrorInvalidTexture This indicates that the texture passed to the API call is not a valid texture. cudaErrorInvalidTextureBinding This indicates that the texture binding is not valid. This occurs if you call cudaGetTextureAlignmentOffset() with an unbound texture. cudaErrorInvalidChannelDescriptor This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by cudaChannelFormatKind, or if one of the dimensions is invalid. cudaErrorInvalidMemcpyDirection This indicates that the direction of the memcpy passed to the API call is not one of the types specified by cudaMemcpyKind. cudaErrorAddressOfConstant This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. Deprecated This error return is deprecated as of CUDA 3.1. Variables in constant memory may now have their address taken by the runtime via cudaGetSymbolAddress(). cudaErrorTextureFetchFailed This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. Deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release. cudaErrorTextureNotBound This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. Deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release. cudaErrorSynchronizationError This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. Deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release. cudaErrorInvalidFilterSetting This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA. cudaErrorInvalidNormSetting This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA. cudaErrorMixedDeviceExecution Mixing of device and device emulation code was not allowed. Deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release. cudaErrorCudartUnloading This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded. cudaErrorUnknown This indicates that an unknown internal error has occurred. cudaErrorNotYetImplemented This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. Deprecated This error return is deprecated as of CUDA 4.1. cudaErrorMemoryValueTooLarge This indicated that an emulated device pointer exceeded the 32-bit address range. Deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release. cudaErrorInvalidResourceHandle This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like cudaStream_t and cudaEvent_t. cudaErrorNotReady This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than cudaSuccess (which indicates completion). Calls that may return this value include cudaEventQuery() and cudaStreamQuery(). cudaErrorInsufficientDriver This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run. cudaErrorSetOnActiveProcess This indicates that the user has called cudaSetValidDevices(), cudaSetDeviceFlags(), cudaD3D9SetDirect3DDevice(), cudaD3D10SetDirect3DDevice, cudaD3D11SetDirect3DDevice(), or cudaVDPAUSetVDPAUDevice() after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing CUcontext active on the host thread. cudaErrorInvalidSurface This indicates that the surface passed to the API call is not a valid surface. cudaErrorNoDevice This indicates that no CUDA-capable devices were detected by the installed CUDA driver. cudaErrorECCUncorrectable This indicates that an uncorrectable ECC error was detected during execution. cudaErrorSharedObjectSymbolNotFound This indicates that a link to a shared object failed to resolve. cudaErrorSharedObjectInitFailed This indicates that initialization of a shared object failed. cudaErrorUnsupportedLimit This indicates that the cudaLimit passed to the API call is not supported by the active device. cudaErrorDuplicateVariableName This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name. cudaErrorDuplicateTextureName This indicates that multiple textures (across separate CUDA source files in the application) share the same string name. cudaErrorDuplicateSurfaceName This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name. cudaErrorDevicesUnavailable This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of cudaComputeModeExclusive, cudaComputeModeProhibited or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed. cudaErrorInvalidKernelImage This indicates that the device kernel image is invalid. cudaErrorNoKernelImageForDevice This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. cudaErrorIncompatibleDriverContext This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see Interactions with the CUDA Driver API' for more information. cudaErrorPeerAccessAlreadyEnabled This error indicates that a call to cudaDeviceEnablePeerAccess() is trying to re- enable peer addressing on from a context which has already had peer addressing enabled. cudaErrorPeerAccessNotEnabled This error indicates that cudaDeviceDisablePeerAccess() is trying to disable peer addressing which has not been enabled yet via cudaDeviceEnablePeerAccess(). cudaErrorDeviceAlreadyInUse This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread. cudaErrorProfilerDisabled This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. cudaErrorProfilerNotInitialized Deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to attempt to enable/disable the profiling via cudaProfilerStart or cudaProfilerStop without initialization. cudaErrorProfilerAlreadyStarted Deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to call cudaProfilerStart() when profiling is already enabled. cudaErrorProfilerAlreadyStopped Deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to call cudaProfilerStop() when profiling is already disabled. cudaErrorAssert An assert triggered in device code during kernel execution. The device cannot be used again until cudaThreadExit() is called. All existing allocations are invalid and must be reconstructed if the program is to continue using CUDA. cudaErrorTooManyPeers This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to cudaEnablePeerAccess(). cudaErrorHostMemoryAlreadyRegistered This error indicates that the memory range passed to cudaHostRegister() has already been registered. cudaErrorHostMemoryNotRegistered This error indicates that the pointer passed to cudaHostUnregister() does not correspond to any currently registered memory region. cudaErrorOperatingSystem This error indicates that an OS call failed. cudaErrorPeerAccessUnsupported This error indicates that P2P access is not supported across the given devices. cudaErrorLaunchMaxDepthExceeded This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches. cudaErrorLaunchFileScopedTex This error indicates that a grid launch did not occur because the kernel uses file- scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API's. cudaErrorLaunchFileScopedSurf This error indicates that a grid launch did not occur because the kernel uses file- scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API's. cudaErrorSyncDepthExceeded This error indicates that a call to cudaDeviceSynchronize made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit cudaLimitDevRuntimeSyncDepth. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which cudaDeviceSynchronize will be called must be specified with the cudaLimitDevRuntimeSyncDepth limit to the cudaDeviceSetLimit api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. cudaErrorLaunchPendingCountExceeded This error indicates that a device runtime grid launch failed because the launch would exceed the limit cudaLimitDevRuntimePendingLaunchCount. For this launch to proceed successfully, cudaDeviceSetLimit must be called to set the cudaLimitDevRuntimePendingLaunchCount to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations. cudaErrorNotPermitted This error indicates the attempted operation is not permitted. cudaErrorNotSupported This error indicates the attempted operation is not supported on the current system or device. cudaErrorHardwareStackError Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. cudaErrorIllegalInstruction The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. cudaErrorMisalignedAddress The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. cudaErrorInvalidAddressSpace While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. cudaErrorInvalidPc The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. cudaErrorIllegalAddress The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. cudaErrorInvalidPtx A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. cudaErrorInvalidGraphicsContext This indicates an error with the OpenGL or DirectX context. cudaErrorNvlinkUncorrectable This indicates that an uncorrectable NVLink error was detected during the execution. cudaErrorJitCompilerNotFound This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. cudaErrorCooperativeLaunchTooLarge This error indicates that the number of blocks launched per grid for a kernel that was launched via either cudaLaunchCooperativeKernel or cudaLaunchCooperativeKernelMultiDevice exceeds the maximum number of blocks as allowed by cudaOccupancyMaxActiveBlocksPerMultiprocessor or cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors as specified by the device attribute cudaDevAttrMultiProcessorCount. cudaErrorStartupFailure This indicates an internal startup failure in the CUDA runtime. cudaErrorApiFailureBase Any unhandled CUDA driver error is added to this value and returned via the runtime. Production releases of CUDA should not return such errors. Deprecated This error return is deprecated as of CUDA 4.1. enum cudaFuncAttribute CUDA function attributes that can be set using cudaFuncSetAttribute Enumerator: cudaFuncAttributeMaxDynamicSharedMemorySize Maximum dynamic shared memory size cudaFuncAttributePreferredSharedMemoryCarveout Preferred shared memory-L1 cache split ratio enum cudaFuncCache CUDA function cache configurations Enumerator: cudaFuncCachePreferNone Default function cache configuration, no preference cudaFuncCachePreferShared Prefer larger shared memory and smaller L1 cache cudaFuncCachePreferL1 Prefer larger L1 cache and smaller shared memory cudaFuncCachePreferEqual Prefer equal size L1 cache and shared memory enum cudaGraphicsCubeFace CUDA graphics interop array indices for cube maps Enumerator: cudaGraphicsCubeFacePositiveX Positive X face of cubemap cudaGraphicsCubeFaceNegativeX Negative X face of cubemap cudaGraphicsCubeFacePositiveY Positive Y face of cubemap cudaGraphicsCubeFaceNegativeY Negative Y face of cubemap cudaGraphicsCubeFacePositiveZ Positive Z face of cubemap cudaGraphicsCubeFaceNegativeZ Negative Z face of cubemap enum cudaGraphicsMapFlags CUDA graphics interop map flags Enumerator: cudaGraphicsMapFlagsNone Default; Assume resource can be read/written cudaGraphicsMapFlagsReadOnly CUDA will not write to this resource cudaGraphicsMapFlagsWriteDiscard CUDA will only write to and will not read from this resource enum cudaGraphicsRegisterFlags CUDA graphics interop register flags Enumerator: cudaGraphicsRegisterFlagsNone Default cudaGraphicsRegisterFlagsReadOnly CUDA will not write to this resource cudaGraphicsRegisterFlagsWriteDiscard CUDA will only write to and will not read from this resource cudaGraphicsRegisterFlagsSurfaceLoadStore CUDA will bind this resource to a surface reference cudaGraphicsRegisterFlagsTextureGather CUDA will perform texture gather operations on this resource enum cudaLimit CUDA Limits Enumerator: cudaLimitStackSize GPU thread stack size cudaLimitPrintfFifoSize GPU printf/fprintf FIFO size cudaLimitMallocHeapSize GPU malloc heap size cudaLimitDevRuntimeSyncDepth GPU device runtime synchronize depth cudaLimitDevRuntimePendingLaunchCount GPU device runtime pending launch count enum cudaMemcpyKind CUDA memory copy types Enumerator: cudaMemcpyHostToHost Host -> Host cudaMemcpyHostToDevice Host -> Device cudaMemcpyDeviceToHost Device -> Host cudaMemcpyDeviceToDevice Device -> Device cudaMemcpyDefault Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing enum cudaMemoryAdvise CUDA Memory Advise values Enumerator: cudaMemAdviseSetReadMostly Data will mostly be read and only occasionally be written to cudaMemAdviseUnsetReadMostly Undo the effect of cudaMemAdviseSetReadMostly cudaMemAdviseSetPreferredLocation Set the preferred location for the data as the specified device cudaMemAdviseUnsetPreferredLocation Clear the preferred location for the data cudaMemAdviseSetAccessedBy Data will be accessed by the specified device, so prevent page faults as much as possible cudaMemAdviseUnsetAccessedBy Let the Unified Memory subsystem decide on the page faulting policy for the specified device enum cudaMemoryType CUDA memory types Enumerator: cudaMemoryTypeHost Host memory cudaMemoryTypeDevice Device memory enum cudaMemRangeAttribute CUDA range attributes Enumerator: cudaMemRangeAttributeReadMostly Whether the range will mostly be read and only occasionally be written to cudaMemRangeAttributePreferredLocation The preferred location of the range cudaMemRangeAttributeAccessedBy Memory range has cudaMemAdviseSetAccessedBy set for specified device cudaMemRangeAttributeLastPrefetchLocation The last location to which the range was prefetched enum cudaOutputMode CUDA Profiler Output modes Enumerator: cudaKeyValuePair Output mode Key-Value pair format. cudaCSV Output mode Comma separated values format. enum cudaResourceType CUDA resource types Enumerator: cudaResourceTypeArray Array resource cudaResourceTypeMipmappedArray Mipmapped array resource cudaResourceTypeLinear Linear resource cudaResourceTypePitch2D Pitch 2D resource enum cudaResourceViewFormat CUDA texture resource view formats Enumerator: cudaResViewFormatNone No resource view format (use underlying resource format) cudaResViewFormatUnsignedChar1 1 channel unsigned 8-bit integers cudaResViewFormatUnsignedChar2 2 channel unsigned 8-bit integers cudaResViewFormatUnsignedChar4 4 channel unsigned 8-bit integers cudaResViewFormatSignedChar1 1 channel signed 8-bit integers cudaResViewFormatSignedChar2 2 channel signed 8-bit integers cudaResViewFormatSignedChar4 4 channel signed 8-bit integers cudaResViewFormatUnsignedShort1 1 channel unsigned 16-bit integers cudaResViewFormatUnsignedShort2 2 channel unsigned 16-bit integers cudaResViewFormatUnsignedShort4 4 channel unsigned 16-bit integers cudaResViewFormatSignedShort1 1 channel signed 16-bit integers cudaResViewFormatSignedShort2 2 channel signed 16-bit integers cudaResViewFormatSignedShort4 4 channel signed 16-bit integers cudaResViewFormatUnsignedInt1 1 channel unsigned 32-bit integers cudaResViewFormatUnsignedInt2 2 channel unsigned 32-bit integers cudaResViewFormatUnsignedInt4 4 channel unsigned 32-bit integers cudaResViewFormatSignedInt1 1 channel signed 32-bit integers cudaResViewFormatSignedInt2 2 channel signed 32-bit integers cudaResViewFormatSignedInt4 4 channel signed 32-bit integers cudaResViewFormatHalf1 1 channel 16-bit floating point cudaResViewFormatHalf2 2 channel 16-bit floating point cudaResViewFormatHalf4 4 channel 16-bit floating point cudaResViewFormatFloat1 1 channel 32-bit floating point cudaResViewFormatFloat2 2 channel 32-bit floating point cudaResViewFormatFloat4 4 channel 32-bit floating point cudaResViewFormatUnsignedBlockCompressed1 Block compressed 1 cudaResViewFormatUnsignedBlockCompressed2 Block compressed 2 cudaResViewFormatUnsignedBlockCompressed3 Block compressed 3 cudaResViewFormatUnsignedBlockCompressed4 Block compressed 4 unsigned cudaResViewFormatSignedBlockCompressed4 Block compressed 4 signed cudaResViewFormatUnsignedBlockCompressed5 Block compressed 5 unsigned cudaResViewFormatSignedBlockCompressed5 Block compressed 5 signed cudaResViewFormatUnsignedBlockCompressed6H Block compressed 6 unsigned half-float cudaResViewFormatSignedBlockCompressed6H Block compressed 6 signed half-float cudaResViewFormatUnsignedBlockCompressed7 Block compressed 7 enum cudaSharedCarveout Shared memory carveout configurations enum cudaSharedMemConfig CUDA shared memory configuration enum cudaSurfaceBoundaryMode CUDA Surface boundary modes Enumerator: cudaBoundaryModeZero Zero boundary mode cudaBoundaryModeClamp Clamp boundary mode cudaBoundaryModeTrap Trap boundary mode enum cudaSurfaceFormatMode CUDA Surface format modes Enumerator: cudaFormatModeForced Forced format mode cudaFormatModeAuto Auto format mode enum cudaTextureAddressMode CUDA texture address modes Enumerator: cudaAddressModeWrap Wrapping address mode cudaAddressModeClamp Clamp to edge address mode cudaAddressModeMirror Mirror address mode cudaAddressModeBorder Border address mode enum cudaTextureFilterMode CUDA texture filter modes Enumerator: cudaFilterModePoint Point filter mode cudaFilterModeLinear Linear filter mode enum cudaTextureReadMode CUDA texture read modes Enumerator: cudaReadModeElementType Read texture as specified element type cudaReadModeNormalizedFloat Read texture as normalized float
Author
Generated automatically by Doxygen from the source code.