5.30. C++ API Routines

This section describes the C++ high level API functions of the CUDA runtime application programming interface. To use these functions, your application needs to be compiled with the nvcc compiler.

Classes

class 

Functions

template < class T, int dim >__host__cudaError_t cudaBindSurfaceToArray ( const surface < T, dim > & surf, cudaArray_const_t array )
[C++ API] Binds an array to a surface
template < class T, int dim >__host__cudaError_t cudaBindSurfaceToArray ( const surface < T, dim > & surf, cudaArray_const_t array, const cudaChannelFormatDesc& desc )
[C++ API] Binds an array to a surface
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTexture ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, size_t size = UINT_MAX )
[C++ API] Binds a memory area to a texture
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTexture ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, const cudaChannelFormatDesc& desc, size_t size = UINT_MAX )
[C++ API] Binds a memory area to a texture
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTexture2D ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, size_t width, size_t height, size_t pitch )
[C++ API] Binds a 2D memory area to a texture
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTexture2D ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, const cudaChannelFormatDesc& desc, size_t width, size_t height, size_t pitch )
[C++ API] Binds a 2D memory area to a texture
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTextureToArray ( const texture < T, dim, readMode > & tex, cudaArray_const_t array )
[C++ API] Binds an array to a texture
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTextureToArray ( const texture < T, dim, readMode > & tex, cudaArray_const_t array, const cudaChannelFormatDesc& desc )
[C++ API] Binds an array to a texture
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTextureToMipmappedArray ( const texture < T, dim, readMode > & tex, cudaMipmappedArray_const_t mipmappedArray )
[C++ API] Binds a mipmapped array to a texture
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaBindTextureToMipmappedArray ( const texture < T, dim, readMode > & tex, cudaMipmappedArray_const_t mipmappedArray, const cudaChannelFormatDesc& desc )
[C++ API] Binds a mipmapped array to a texture
template < class T >__host__cudaChannelFormatDesc cudaCreateChannelDesc ( void )
[C++ API] Returns a channel descriptor using the specified format
__host__cudaError_t cudaEventCreate ( cudaEvent_t* event, unsigned int  flags )
[C++ API] Creates an event object with the specified flags
template < class T >__host__cudaError_t cudaFuncGetAttributes ( cudaFuncAttributes* attr, T* entry )
[C++ API] Find out attributes for a given function
template < class T >__host__cudaError_t cudaFuncSetAttribute ( T* entry, cudaFuncAttribute attr, int  value )
[C++ API] Set attributes for a given function
template < class T >__host__cudaError_t cudaFuncSetCacheConfig ( T* func, cudaFuncCache cacheConfig )
[C++ API] Sets the preferred cache configuration for a device function
template < class T >__host__cudaError_t cudaGetSymbolAddress ( void** devPtr, const T& symbol )
[C++ API] Finds the address associated with a CUDA symbol
template < class T >__host__cudaError_t cudaGetSymbolSize ( size_t* size, const T& symbol )
[C++ API] Finds the size of the object associated with a CUDA symbol
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaGetTextureAlignmentOffset ( size_t* offset, const texture < T, dim, readMode > & tex )
[C++ API] Get the alignment offset of a texture
template < class T >__host__cudaError_t cudaLaunchCooperativeKernel ( const T* func, dim3 gridDim, dim3 blockDim, void** args, size_t sharedMem = 0, cudaStream_t stream = 0 )
Launches a device function.
template < class T >__host__cudaError_t cudaLaunchKernel ( const T* func, dim3 gridDim, dim3 blockDim, void** args, size_t sharedMem = 0, cudaStream_t stream = 0 )
Launches a device function.
__host__cudaError_t cudaMallocHost ( void** ptr, size_t size, unsigned int  flags )
[C++ API] Allocates page-locked memory on the host
template < class T >__host__cudaError_t cudaMallocManaged ( T** devPtr, size_t size, unsigned int  flags = cudaMemAttachGlobal )
Allocates memory that will be automatically managed by the Unified Memory system.
template < class T >__host__cudaError_t cudaMemcpyFromSymbol ( void* dst, const T& symbol, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyDeviceToHost )
[C++ API] Copies data from the given symbol on the device
template < class T >__host__cudaError_t cudaMemcpyFromSymbolAsync ( void* dst, const T& symbol, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyDeviceToHost, cudaStream_t stream = 0 )
[C++ API] Copies data from the given symbol on the device
template < class T >__host__cudaError_t cudaMemcpyToSymbol ( const T& symbol, const void* src, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyHostToDevice )
[C++ API] Copies data to the given symbol on the device
template < class T >__host__cudaError_t cudaMemcpyToSymbolAsync ( const T& symbol, const void* src, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyHostToDevice, cudaStream_t stream = 0 )
[C++ API] Copies data to the given symbol on the device
template < class T >__host__cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor ( int* numBlocks, T func, int  blockSize, size_t dynamicSMemSize )
Returns occupancy for a device function.
template < class T >__host__cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags ( int* numBlocks, T func, int  blockSize, size_t dynamicSMemSize, unsigned int  flags )
Returns occupancy for a device function with the specified flags.
template < class T >__host__cudaError_t cudaOccupancyMaxPotentialBlockSize ( int* minGridSize, int* blockSize, T func, size_t dynamicSMemSize = 0, int  blockSizeLimit = 0 )
Returns grid and block size that achieves maximum potential occupancy for a device function.
template < typename UnaryFunction, class T >__host__cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMem ( int* minGridSize, int* blockSize, T func, UnaryFunction blockSizeToDynamicSMemSize, int  blockSizeLimit = 0 )
Returns grid and block size that achieves maximum potential occupancy for a device function.
template < typename UnaryFunction, class T >__host__cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags ( int* minGridSize, int* blockSize, T func, UnaryFunction blockSizeToDynamicSMemSize, int  blockSizeLimit = 0, unsigned int  flags = 0 )
Returns grid and block size that achieves maximum potential occupancy for a device function.
template < class T >__host__cudaError_t cudaOccupancyMaxPotentialBlockSizeWithFlags ( int* minGridSize, int* blockSize, T func, size_t dynamicSMemSize = 0, int  blockSizeLimit = 0, unsigned int  flags = 0 )
Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags.
template < class T >__host__cudaError_t cudaStreamAttachMemAsync ( cudaStream_t stream, T* devPtr, size_t length = 0, unsigned int  flags = cudaMemAttachSingle )
Attach memory to a stream asynchronously.
template < class T, int dim, enum cudaTextureReadMode readMode >__host__cudaError_t cudaUnbindTexture ( const texture < T, dim, readMode > & tex )
[C++ API] Unbinds a texture

Functions

template < class T, int dim >

__host__cudaError_t cudaBindSurfaceToArray ( const surface < T, dim > & surf, cudaArray_const_t array ) [inline]
[C++ API] Binds an array to a surface
Parameters
surf
- Surface to bind
array
- Memory array on device
Description

Binds the CUDA array array to the surface reference surf. The channel descriptor is inherited from the CUDA array. Any CUDA array previously bound to surf is unbound.

Note:

See also:

cudaBindSurfaceToArray ( C API), cudaBindSurfaceToArray ( C++ API)

template < class T, int dim >

__host__cudaError_t cudaBindSurfaceToArray ( const surface < T, dim > & surf, cudaArray_const_t array, const cudaChannelFormatDesc& desc ) [inline]
[C++ API] Binds an array to a surface
Parameters
surf
- Surface to bind
array
- Memory array on device
desc
- Channel format
Description

Binds the CUDA array array to the surface reference surf. desc describes how the memory is interpreted when dealing with the surface. Any CUDA array previously bound to surf is unbound.

Note:

See also:

cudaBindSurfaceToArray ( C API), cudaBindSurfaceToArray ( C++ API, inherited channel descriptor)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTexture ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, size_t size = UINT_MAX ) [inline]
[C++ API] Binds a memory area to a texture
Parameters
offset
- Offset in bytes
tex
- Texture to bind
devPtr
- Memory area on device
size
- Size of the memory area pointed to by devPtr
Description

Binds size bytes of the memory area pointed to by devPtr to texture reference tex. The channel descriptor is inherited from the texture reference type. The offset parameter is an optional byte offset as with the low-level cudaBindTexture( size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) function. Any memory previously bound to tex is unbound.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C API), cudaBindTexture ( C++ API), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C++ API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTexture ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, const cudaChannelFormatDesc& desc, size_t size = UINT_MAX ) [inline]
[C++ API] Binds a memory area to a texture
Parameters
offset
- Offset in bytes
tex
- Texture to bind
devPtr
- Memory area on device
desc
- Channel format
size
- Size of the memory area pointed to by devPtr
Description

Binds size bytes of the memory area pointed to by devPtr to texture reference tex. desc describes how the memory is interpreted when fetching values from the texture. The offset parameter is an optional byte offset as with the low-level cudaBindTexture() function. Any memory previously bound to tex is unbound.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C++ API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTexture2D ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, size_t width, size_t height, size_t pitch ) [inline]
[C++ API] Binds a 2D memory area to a texture
Parameters
offset
- Offset in bytes
tex
- Texture reference to bind
devPtr
- 2D memory area on device
width
- Width in texel units
height
- Height in texel units
pitch
- Pitch in bytes
Description

Binds the 2D memory area pointed to by devPtr to the texture reference tex. The size of the area is constrained by width in texel units, height in texel units, and pitch in byte units. The channel descriptor is inherited from the texture reference type. Any memory previously bound to tex is unbound.

Since the hardware enforces an alignment requirement on texture base addresses, cudaBindTexture2D() returns in *offset a byte offset that must be applied to texture fetches in order to read from the desired memory. This offset must be divided by the texel size and passed to kernels that read from the texture so they can be applied to the tex2D() function. If the device memory pointer was returned from cudaMalloc(), the offset is guaranteed to be 0 and NULL may be passed as the offset parameter.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C API), cudaBindTexture2D ( C++ API), cudaBindTextureToArray ( C++ API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTexture2D ( size_t* offset, const texture < T, dim, readMode > & tex, const void* devPtr, const cudaChannelFormatDesc& desc, size_t width, size_t height, size_t pitch ) [inline]
[C++ API] Binds a 2D memory area to a texture
Parameters
offset
- Offset in bytes
tex
- Texture reference to bind
devPtr
- 2D memory area on device
desc
- Channel format
width
- Width in texel units
height
- Height in texel units
pitch
- Pitch in bytes
Description

Binds the 2D memory area pointed to by devPtr to the texture reference tex. The size of the area is constrained by width in texel units, height in texel units, and pitch in byte units. desc describes how the memory is interpreted when fetching values from the texture. Any memory previously bound to tex is unbound.

Since the hardware enforces an alignment requirement on texture base addresses, cudaBindTexture2D() returns in *offset a byte offset that must be applied to texture fetches in order to read from the desired memory. This offset must be divided by the texel size and passed to kernels that read from the texture so they can be applied to the tex2D() function. If the device memory pointer was returned from cudaMalloc(), the offset is guaranteed to be 0 and NULL may be passed as the offset parameter.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C++ API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTextureToArray ( const texture < T, dim, readMode > & tex, cudaArray_const_t array ) [inline]
[C++ API] Binds an array to a texture
Parameters
tex
- Texture to bind
array
- Memory array on device
Description

Binds the CUDA array array to the texture reference tex. The channel descriptor is inherited from the CUDA array. Any CUDA array previously bound to tex is unbound.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C API), cudaBindTextureToArray ( C++ API), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTextureToArray ( const texture < T, dim, readMode > & tex, cudaArray_const_t array, const cudaChannelFormatDesc& desc ) [inline]
[C++ API] Binds an array to a texture
Parameters
tex
- Texture to bind
array
- Memory array on device
desc
- Channel format
Description

Binds the CUDA array array to the texture reference tex. desc describes how the memory is interpreted when fetching values from the texture. Any CUDA array previously bound to tex is unbound.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTextureToMipmappedArray ( const texture < T, dim, readMode > & tex, cudaMipmappedArray_const_t mipmappedArray ) [inline]
[C++ API] Binds a mipmapped array to a texture
Parameters
tex
- Texture to bind
mipmappedArray
- Memory mipmapped array on device
Description

Binds the CUDA mipmapped array mipmappedArray to the texture reference tex. The channel descriptor is inherited from the CUDA array. Any CUDA mipmapped array previously bound to tex is unbound.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C API), cudaBindTextureToArray ( C++ API), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaBindTextureToMipmappedArray ( const texture < T, dim, readMode > & tex, cudaMipmappedArray_const_t mipmappedArray, const cudaChannelFormatDesc& desc ) [inline]
[C++ API] Binds a mipmapped array to a texture
Parameters
tex
- Texture to bind
mipmappedArray
- Memory mipmapped array on device
desc
- Channel format
Description

Binds the CUDA mipmapped array mipmappedArray to the texture reference tex. desc describes how the memory is interpreted when fetching values from the texture. Any CUDA mipmapped array previously bound to tex is unbound.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C++ API)

template < class T >

__host__cudaChannelFormatDesc cudaCreateChannelDesc ( void ) [inline]
[C++ API] Returns a channel descriptor using the specified format
Returns

Channel descriptor with format f

__host__cudaError_t cudaEventCreate ( cudaEvent_t* event, unsigned int  flags )
[C++ API] Creates an event object with the specified flags
Parameters
event
- Newly created event
flags
- Flags for new event
Description

Creates an event object with the specified flags. Valid flags include:

Note:

See also:

cudaEventCreate ( C API), cudaEventCreateWithFlags, cudaEventRecord, cudaEventQuery, cudaEventSynchronize, cudaEventDestroy, cudaEventElapsedTime, cudaStreamWaitEvent

template < class T >

__host__cudaError_t cudaFuncGetAttributes ( cudaFuncAttributes* attr, T* entry ) [inline]
[C++ API] Find out attributes for a given function
Parameters
attr
- Return pointer to function's attributes
entry
- Function to get attributes of
Description

This function obtains the attributes of a function specified via entry. The parameter entry must be a pointer to a function that executes on the device. The parameter specified by entry must be declared as a __global__ function. The fetched attributes are placed in attr. If the specified function does not exist, then cudaErrorInvalidDeviceFunction is returned.

Note that some function attributes such as maxThreadsPerBlock may vary based on the device that is currently being used.

Note:

cudaLaunchKernel ( C++ API), cudaFuncSetCacheConfig ( C++ API), cudaFuncGetAttributes ( C API), cudaSetDoubleForDevice, cudaSetDoubleForHost

template < class T >

__host__cudaError_t cudaFuncSetAttribute ( T* entry, cudaFuncAttribute attr, int  value ) [inline]
[C++ API] Set attributes for a given function
Parameters
entry
- Function to get attributes of
attr
- Attribute to set
value
- Value to set
Description

This function sets the attributes of a function specified via entry. The parameter entry must be a pointer to a function that executes on the device. The parameter specified by entry must be declared as a __global__ function. The enumeration defined by attr is set to the value defined by value. If the specified function does not exist, then cudaErrorInvalidDeviceFunction is returned. If the specified attribute cannot be written, or if the value is incorrect, then cudaErrorInvalidValue is returned.

Valid values for attr are:

Note:

cudaLaunchKernel ( C++ API), cudaFuncSetCacheConfig ( C++ API), cudaFuncGetAttributes ( C API), cudaSetDoubleForDevice, cudaSetDoubleForHost

template < class T >

__host__cudaError_t cudaFuncSetCacheConfig ( T* func, cudaFuncCache cacheConfig ) [inline]
[C++ API] Sets the preferred cache configuration for a device function
Parameters
func
- device function pointer
cacheConfig
- Requested cache configuration
Description

On devices where the L1 cache and shared memory use the same hardware resources, this sets through cacheConfig the preferred cache configuration for the function specified via func. This is only a preference. The runtime will use the requested configuration if possible, but it is free to choose a different configuration if required to execute func.

func must be a pointer to a function that executes on the device. The parameter specified by func must be declared as a __global__ function. If the specified function does not exist, then cudaErrorInvalidDeviceFunction is returned.

This setting does nothing on devices where the size of the L1 cache and shared memory are fixed.

Launching a kernel with a different preference than the most recent preference setting may insert a device-side synchronization point.

The supported cache configurations are:

Note:

cudaLaunchKernel ( C++ API), cudaFuncSetCacheConfig ( C API), cudaFuncGetAttributes ( C++ API), cudaSetDoubleForDevice, cudaSetDoubleForHost, cudaThreadGetCacheConfig, cudaThreadSetCacheConfig

template < class T >

__host__cudaError_t cudaGetSymbolAddress ( void** devPtr, const T& symbol ) [inline]
[C++ API] Finds the address associated with a CUDA symbol
Parameters
devPtr
- Return device pointer associated with symbol
symbol
- Device symbol reference
Description

Returns in *devPtr the address of symbol symbol on the device. symbol can either be a variable that resides in global or constant memory space. If symbol cannot be found, or if symbol is not declared in the global or constant memory space, *devPtr is unchanged and the error cudaErrorInvalidSymbol is returned.

Note:

See also:

cudaGetSymbolAddress ( C API), cudaGetSymbolSize ( C++ API)

template < class T >

__host__cudaError_t cudaGetSymbolSize ( size_t* size, const T& symbol ) [inline]
[C++ API] Finds the size of the object associated with a CUDA symbol
Parameters
size
- Size of object associated with symbol
symbol
- Device symbol reference
Description

Returns in *size the size of symbol symbol. symbol must be a variable that resides in global or constant memory space. If symbol cannot be found, or if symbol is not declared in global or constant memory space, *size is unchanged and the error cudaErrorInvalidSymbol is returned.

Note:

See also:

cudaGetSymbolAddress ( C++ API), cudaGetSymbolSize ( C API)

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaGetTextureAlignmentOffset ( size_t* offset, const texture < T, dim, readMode > & tex ) [inline]
[C++ API] Get the alignment offset of a texture
Parameters
offset
- Offset of texture reference in bytes
tex
- Texture to get offset of
Description

Returns in *offset the offset that was returned when texture reference tex was bound.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C++ API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C++ API), cudaGetTextureAlignmentOffset ( C API)

template < class T >

__host__cudaError_t cudaLaunchCooperativeKernel ( const T* func, dim3 gridDim, dim3 blockDim, void** args, size_t sharedMem = 0, cudaStream_t stream = 0 ) [inline]
Launches a device function.
Parameters
func
- Device function symbol
gridDim
- Grid dimentions
blockDim
- Block dimentions
args
- Arguments
sharedMem
- Shared memory (defaults to 0)
stream
- Stream identifier (defaults to NULL)
Description

The function invokes kernel func on gridDim (gridDim.x × gridDim.y × gridDim.z) grid of blocks. Each block contains blockDim (blockDim.x × blockDim.y × blockDim.z) threads.

The device on which this kernel is invoked must have a non-zero value for the device attribute cudaDevAttrCooperativeLaunch.

The total number of blocks launched cannot exceed the maximum number of blocks per multiprocessor as returned by cudaOccupancyMaxActiveBlocksPerMultiprocessor (or cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors as specified by the device attribute cudaDevAttrMultiProcessorCount.

The kernel cannot make use of CUDA dynamic parallelism.

If the kernel has N parameters the args should point to array of N pointers. Each pointer, from args[0] to args[N - 1], point to the region of memory from which the actual parameter will be copied.

sharedMem sets the amount of dynamic shared memory that will be available to each thread block.

stream specifies a stream the invocation is associated to.

Note:

cudaLaunchCooperativeKernel ( C API)

template < class T >

__host__cudaError_t cudaLaunchKernel ( const T* func, dim3 gridDim, dim3 blockDim, void** args, size_t sharedMem = 0, cudaStream_t stream = 0 ) [inline]
Launches a device function.
Parameters
func
- Device function symbol
gridDim
- Grid dimentions
blockDim
- Block dimentions
args
- Arguments
sharedMem
- Shared memory (defaults to 0)
stream
- Stream identifier (defaults to NULL)
Description

The function invokes kernel func on gridDim (gridDim.x × gridDim.y × gridDim.z) grid of blocks. Each block contains blockDim (blockDim.x × blockDim.y × blockDim.z) threads.

If the kernel has N parameters the args should point to array of N pointers. Each pointer, from args[0] to args[N - 1], point to the region of memory from which the actual parameter will be copied.

sharedMem sets the amount of dynamic shared memory that will be available to each thread block.

stream specifies a stream the invocation is associated to.

Note:

cudaLaunchKernel ( C API)

__host__cudaError_t cudaMallocHost ( void** ptr, size_t size, unsigned int  flags )
[C++ API] Allocates page-locked memory on the host
Parameters
ptr
- Device pointer to allocated memory
size
- Requested allocation size in bytes
flags
- Requested properties of allocated memory
Description

Allocates size bytes of host memory that is page-locked and accessible to the device. The driver tracks the virtual memory ranges allocated with this function and automatically accelerates calls to functions such as cudaMemcpy(). Since the memory can be accessed directly by the device, it can be read or written with much higher bandwidth than pageable memory obtained with functions such as malloc(). Allocating excessive amounts of pinned memory may degrade system performance, since it reduces the amount of memory available to the system for paging. As a result, this function is best used sparingly to allocate staging areas for data exchange between host and device.

The flags parameter enables different options to be specified that affect the allocation, as follows.

  • cudaHostAllocDefault: This flag's value is defined to be 0.

  • cudaHostAllocPortable: The memory returned by this call will be considered as pinned memory by all CUDA contexts, not just the one that performed the allocation.

  • cudaHostAllocMapped: Maps the allocation into the CUDA address space. The device pointer to the memory may be obtained by calling cudaHostGetDevicePointer().

  • cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC). WC memory can be transferred across the PCI Express bus more quickly on some system configurations, but cannot be read efficiently by most CPUs. WC memory is a good option for buffers that will be written by the CPU and read by the device via mapped pinned memory or host->device transfers.

All of these flags are orthogonal to one another: a developer may allocate memory that is portable, mapped and/or write-combined with no restrictions.

cudaSetDeviceFlags() must have been called with the cudaDeviceMapHost flag in order for the cudaHostAllocMapped flag to have any effect.

The cudaHostAllocMapped flag may be specified on CUDA contexts for devices that do not support mapped pinned memory. The failure is deferred to cudaHostGetDevicePointer() because the memory may be mapped into other CUDA contexts via the cudaHostAllocPortable flag.

Memory allocated by this function must be freed with cudaFreeHost().

Note:

See also:

cudaSetDeviceFlags, cudaMallocHost ( C API), cudaFreeHost, cudaHostAlloc

template < class T >

__host__cudaError_t cudaMallocManaged ( T** devPtr, size_t size, unsigned int  flags = cudaMemAttachGlobal ) [inline]
Allocates memory that will be automatically managed by the Unified Memory system.
Parameters
devPtr
- Pointer to allocated device memory
size
- Requested allocation size in bytes
flags
- Must be either cudaMemAttachGlobal or cudaMemAttachHost (defaults to cudaMemAttachGlobal)
Description

Allocates size bytes of managed memory on the device and returns in *devPtr a pointer to the allocated memory. If the device doesn't support allocating managed memory, cudaErrorNotSupported is returned. Support for managed memory can be queried using the device attribute cudaDevAttrManagedMemory. The allocated memory is suitably aligned for any kind of variable. The memory is not cleared. If size is 0, cudaMallocManaged returns cudaErrorInvalidValue. The pointer is valid on the CPU and on all GPUs in the system that support managed memory. All accesses to this pointer must obey the Unified Memory programming model.

flags specifies the default stream association for this allocation. flags must be one of cudaMemAttachGlobal or cudaMemAttachHost. The default value for flags is cudaMemAttachGlobal. If cudaMemAttachGlobal is specified, then this memory is accessible from any stream on any device. If cudaMemAttachHost is specified, then the allocation should not be accessed from devices that have a zero value for the device attribute cudaDevAttrConcurrentManagedAccess; an explicit call to cudaStreamAttachMemAsync will be required to enable access on such devices.

If the association is later changed via cudaStreamAttachMemAsync to a single stream, the default association, as specifed during cudaMallocManaged, is restored when that stream is destroyed. For __managed__ variables, the default association is always cudaMemAttachGlobal. Note that destroying a stream is an asynchronous operation, and as a result, the change to default association won't happen until all work in the stream has completed.

Memory allocated with cudaMallocManaged should be released with cudaFree.

Device memory oversubscription is possible for GPUs that have a non-zero value for the device attribute cudaDevAttrConcurrentManagedAccess. Managed memory on such GPUs may be evicted from device memory to host memory at any time by the Unified Memory driver in order to make room for other allocations.

In a multi-GPU system where all GPUs have a non-zero value for the device attribute cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this API returns and instead may be populated on access. In such systems, managed memory can migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to maintain data locality and prevent excessive page faults to the extent possible. The application can also guide the driver about memory usage patterns via cudaMemAdvise. The application can also explicitly migrate memory to a desired processor's memory via cudaMemPrefetchAsync.

In a multi-GPU system where all of the GPUs have a zero value for the device attribute cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support with each other, the physical storage for managed memory is created on the GPU which is active at the time cudaMallocManaged is called. All other GPUs will reference the data at reduced bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate memory among such GPUs.

In a multi-GPU system where not all GPUs have peer-to-peer support with each other and where the value of the device attribute cudaDevAttrConcurrentManagedAccess is zero for at least one of those GPUs, the location chosen for physical storage of managed memory is system-dependent.

  • On Linux, the location chosen will be device memory as long as the current set of active contexts are on devices that either have peer-to-peer support with each other or have a non-zero value for the device attribute cudaDevAttrConcurrentManagedAccess. If there is an active context on a GPU that does not have a non-zero value for that device attribute and it does not have peer-to-peer support with the other devices that have active contexts on them, then the location for physical storage will be 'zero-copy' or host memory. Note that this means that managed memory that is located in device memory is migrated to host memory if a new context is created on a GPU that doesn't have a non-zero value for the device attribute and does not support peer-to-peer with at least one of the other devices that has an active context. This in turn implies that context creation may fail if there is insufficient host memory to migrate all managed allocations.

  • On Windows, the physical storage is always created in 'zero-copy' or host memory. All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to restrict CUDA to only use those GPUs that have peer-to-peer support. Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero value to force the driver to always use device memory for physical storage. When this environment variable is set to a non-zero value, all devices used in that process that support managed memory have to be peer-to-peer compatible with each other. The error cudaErrorInvalidDevice will be returned if a device that supports managed memory is used and it is not peer-to-peer compatible with any of the other managed memory supporting devices that were previously used in that process, even if cudaDeviceReset has been called on those devices. These environment variables are described in the CUDA programming guide under the "CUDA environment variables" section.

  • On ARM, managed memory is not available on discrete gpu with Drive PX-2.

Note:

See also:

cudaMallocPitch, cudaFree, cudaMallocArray, cudaFreeArray, cudaMalloc3D, cudaMalloc3DArray, cudaMallocHost ( C API), cudaFreeHost, cudaHostAlloc, cudaDeviceGetAttribute, cudaStreamAttachMemAsync

template < class T >

__host__cudaError_t cudaMemcpyFromSymbol ( void* dst, const T& symbol, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyDeviceToHost ) [inline]
[C++ API] Copies data from the given symbol on the device
Parameters
dst
- Destination memory address
symbol
- Device symbol reference
count
- Size in bytes to copy
offset
- Offset from start of symbol in bytes
kind
- Type of transfer
Description

Copies count bytes from the memory area offset bytes from the start of symbol symbol to the memory area pointed to by dst. The memory areas may not overlap. symbol is a variable that resides in global or constant memory space. kind can be either cudaMemcpyDeviceToHost or cudaMemcpyDeviceToDevice.

Note:
  • Note that this function may also return error codes from previous, asynchronous launches.

  • This function exhibits synchronous behavior for most use cases.

  • Use of a string naming a variable as the symbol parameter was deprecated in CUDA 4.1 and removed in CUDA 5.0.

  • Note that this function may also return cudaErrorInitializationError, cudaErrorInsufficientDriver or cudaErrorNoDevice if this call tries to initialize internal CUDA RT state.

  • Note that as specified by cudaStreamAddCallback no CUDA function may be called from callback. cudaErrorNotPermitted may, but is not guaranteed to, be returned as a diagnostic in such case.

See also:

cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync

template < class T >

__host__cudaError_t cudaMemcpyFromSymbolAsync ( void* dst, const T& symbol, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyDeviceToHost, cudaStream_t stream = 0 ) [inline]
[C++ API] Copies data from the given symbol on the device
Parameters
dst
- Destination memory address
symbol
- Device symbol reference
count
- Size in bytes to copy
offset
- Offset from start of symbol in bytes
kind
- Type of transfer
stream
- Stream identifier
Description

Copies count bytes from the memory area offset bytes from the start of symbol symbol to the memory area pointed to by dst. The memory areas may not overlap. symbol is a variable that resides in global or constant memory space. kind can be either cudaMemcpyDeviceToHost or cudaMemcpyDeviceToDevice.

cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so the call may return before the copy is complete. The copy can optionally be associated to a stream by passing a non-zero stream argument. If kind is cudaMemcpyDeviceToHost and stream is non-zero, the copy may overlap with operations in other streams.

Note:
  • Note that this function may also return error codes from previous, asynchronous launches.

  • This function exhibits asynchronous behavior for most use cases.

  • Use of a string naming a variable as the symbol parameter was deprecated in CUDA 4.1 and removed in CUDA 5.0.

  • Note that this function may also return cudaErrorInitializationError, cudaErrorInsufficientDriver or cudaErrorNoDevice if this call tries to initialize internal CUDA RT state.

  • Note that as specified by cudaStreamAddCallback no CUDA function may be called from callback. cudaErrorNotPermitted may, but is not guaranteed to, be returned as a diagnostic in such case.

See also:

cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync

template < class T >

__host__cudaError_t cudaMemcpyToSymbol ( const T& symbol, const void* src, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyHostToDevice ) [inline]
[C++ API] Copies data to the given symbol on the device
Parameters
symbol
- Device symbol reference
src
- Source memory address
count
- Size in bytes to copy
offset
- Offset from start of symbol in bytes
kind
- Type of transfer
Description

Copies count bytes from the memory area pointed to by src to the memory area offset bytes from the start of symbol symbol. The memory areas may not overlap. symbol is a variable that resides in global or constant memory space. kind can be either cudaMemcpyHostToDevice or cudaMemcpyDeviceToDevice.

Note:
  • Note that this function may also return error codes from previous, asynchronous launches.

  • This function exhibits synchronous behavior for most use cases.

  • Use of a string naming a variable as the symbol parameter was deprecated in CUDA 4.1 and removed in CUDA 5.0.

  • Note that this function may also return cudaErrorInitializationError, cudaErrorInsufficientDriver or cudaErrorNoDevice if this call tries to initialize internal CUDA RT state.

  • Note that as specified by cudaStreamAddCallback no CUDA function may be called from callback. cudaErrorNotPermitted may, but is not guaranteed to, be returned as a diagnostic in such case.

See also:

cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync

template < class T >

__host__cudaError_t cudaMemcpyToSymbolAsync ( const T& symbol, const void* src, size_t count, size_t offset = 0, cudaMemcpyKind kind = cudaMemcpyHostToDevice, cudaStream_t stream = 0 ) [inline]
[C++ API] Copies data to the given symbol on the device
Parameters
symbol
- Device symbol reference
src
- Source memory address
count
- Size in bytes to copy
offset
- Offset from start of symbol in bytes
kind
- Type of transfer
stream
- Stream identifier
Description

Copies count bytes from the memory area pointed to by src to the memory area offset bytes from the start of symbol symbol. The memory areas may not overlap. symbol is a variable that resides in global or constant memory space. kind can be either cudaMemcpyHostToDevice or cudaMemcpyDeviceToDevice.

cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so the call may return before the copy is complete. The copy can optionally be associated to a stream by passing a non-zero stream argument. If kind is cudaMemcpyHostToDevice and stream is non-zero, the copy may overlap with operations in other streams.

Note:
  • Note that this function may also return error codes from previous, asynchronous launches.

  • This function exhibits asynchronous behavior for most use cases.

  • Use of a string naming a variable as the symbol parameter was deprecated in CUDA 4.1 and removed in CUDA 5.0.

  • Note that this function may also return cudaErrorInitializationError, cudaErrorInsufficientDriver or cudaErrorNoDevice if this call tries to initialize internal CUDA RT state.

  • Note that as specified by cudaStreamAddCallback no CUDA function may be called from callback. cudaErrorNotPermitted may, but is not guaranteed to, be returned as a diagnostic in such case.

See also:

cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyFromSymbolAsync

template < class T >

__host__cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor ( int* numBlocks, T func, int  blockSize, size_t dynamicSMemSize ) [inline]
Returns occupancy for a device function.
Parameters
numBlocks
- Returned occupancy
func
- Kernel function for which occupancy is calulated
blockSize
- Block size the kernel is intended to be launched with
dynamicSMemSize
- Per-block dynamic shared memory usage intended, in bytes
Description

Returns in *numBlocks the maximum number of active blocks per streaming multiprocessor for the device function.

Note:

See also:

cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags

cudaOccupancyMaxPotentialBlockSize

cudaOccupancyMaxPotentialBlockSizeWithFlags

cudaOccupancyMaxPotentialBlockSizeVariableSMem

cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags

template < class T >

__host__cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags ( int* numBlocks, T func, int  blockSize, size_t dynamicSMemSize, unsigned int  flags ) [inline]
Returns occupancy for a device function with the specified flags.
Parameters
numBlocks
- Returned occupancy
func
- Kernel function for which occupancy is calulated
blockSize
- Block size the kernel is intended to be launched with
dynamicSMemSize
- Per-block dynamic shared memory usage intended, in bytes
flags
- Requested behavior for the occupancy calculator
Description

Returns in *numBlocks the maximum number of active blocks per streaming multiprocessor for the device function.

The flags parameter controls how special cases are handled. Valid flags include:

  • cudaOccupancyDisableCachingOverride: suppresses the default behavior on platform where global caching affects occupancy. On such platforms, if caching is enabled, but per-block SM resource usage would result in zero occupancy, the occupancy calculator will calculate the occupancy as if caching is disabled. Setting this flag makes the occupancy calculator to return 0 in such cases. More information can be found about this feature in the "Unified L1/Texture Cache" section of the Maxwell tuning guide.

Note:

See also:

cudaOccupancyMaxActiveBlocksPerMultiprocessor

cudaOccupancyMaxPotentialBlockSize

cudaOccupancyMaxPotentialBlockSizeWithFlags

cudaOccupancyMaxPotentialBlockSizeVariableSMem

cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags

template < class T >

__host__cudaError_t cudaOccupancyMaxPotentialBlockSize ( int* minGridSize, int* blockSize, T func, size_t dynamicSMemSize = 0, int  blockSizeLimit = 0 ) [inline]
Returns grid and block size that achieves maximum potential occupancy for a device function.
Parameters
minGridSize
- Returned minimum grid size needed to achieve the best potential occupancy
blockSize
- Returned block size
func
- Device function symbol
dynamicSMemSize
- Per-block dynamic shared memory usage intended, in bytes
blockSizeLimit
- The maximum block size func is designed to work with. 0 means no limit.
Description

Returns in *minGridSize and *blocksize a suggested grid / block size pair that achieves the best potential occupancy (i.e. the maximum number of active warps with the smallest number of blocks).

Use

See also:

cudaOccupancyMaxPotentialBlockSizeVariableSMem if the amount of per-block dynamic shared memory changes with different block sizes.

Note:

See also:

cudaOccupancyMaxPotentialBlockSizeWithFlags

cudaOccupancyMaxActiveBlocksPerMultiprocessor

cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags

cudaOccupancyMaxPotentialBlockSizeVariableSMem

cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags

template < typename UnaryFunction, class T >

__host__cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMem ( int* minGridSize, int* blockSize, T func, UnaryFunction blockSizeToDynamicSMemSize, int  blockSizeLimit = 0 ) [inline]
Returns grid and block size that achieves maximum potential occupancy for a device function.
Parameters
minGridSize
- Returned minimum grid size needed to achieve the best potential occupancy
blockSize
- Returned block size
func
- Device function symbol
blockSizeToDynamicSMemSize
- A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block
blockSizeLimit
- The maximum block size func is designed to work with. 0 means no limit.
Description

Returns in *minGridSize and *blocksize a suggested grid / block size pair that achieves the best potential occupancy (i.e. the maximum number of active warps with the smallest number of blocks).

Note:

See also:

cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags

cudaOccupancyMaxActiveBlocksPerMultiprocessor

cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags

cudaOccupancyMaxPotentialBlockSize

cudaOccupancyMaxPotentialBlockSizeWithFlags

template < typename UnaryFunction, class T >

__host__cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags ( int* minGridSize, int* blockSize, T func, UnaryFunction blockSizeToDynamicSMemSize, int  blockSizeLimit = 0, unsigned int  flags = 0 ) [inline]
Returns grid and block size that achieves maximum potential occupancy for a device function.
Parameters
minGridSize
- Returned minimum grid size needed to achieve the best potential occupancy
blockSize
- Returned block size
func
- Device function symbol
blockSizeToDynamicSMemSize
- A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block
blockSizeLimit
- The maximum block size func is designed to work with. 0 means no limit.
flags
- Requested behavior for the occupancy calculator
Description

Returns in *minGridSize and *blocksize a suggested grid / block size pair that achieves the best potential occupancy (i.e. the maximum number of active warps with the smallest number of blocks).

The flags parameter controls how special cases are handled. Valid flags include:

  • cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior on platform where global caching affects occupancy. On such platforms, if caching is enabled, but per-block SM resource usage would result in zero occupancy, the occupancy calculator will calculate the occupancy as if caching is disabled. Setting this flag makes the occupancy calculator to return 0 in such cases. More information can be found about this feature in the "Unified L1/Texture Cache" section of the Maxwell tuning guide.

Note:

See also:

cudaOccupancyMaxPotentialBlockSizeVariableSMem

cudaOccupancyMaxActiveBlocksPerMultiprocessor

cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags

cudaOccupancyMaxPotentialBlockSize

cudaOccupancyMaxPotentialBlockSizeWithFlags

template < class T >

__host__cudaError_t cudaOccupancyMaxPotentialBlockSizeWithFlags ( int* minGridSize, int* blockSize, T func, size_t dynamicSMemSize = 0, int  blockSizeLimit = 0, unsigned int  flags = 0 ) [inline]
Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags.
Parameters
minGridSize
- Returned minimum grid size needed to achieve the best potential occupancy
blockSize
- Returned block size
func
- Device function symbol
dynamicSMemSize
- Per-block dynamic shared memory usage intended, in bytes
blockSizeLimit
- The maximum block size func is designed to work with. 0 means no limit.
flags
- Requested behavior for the occupancy calculator
Description

Returns in *minGridSize and *blocksize a suggested grid / block size pair that achieves the best potential occupancy (i.e. the maximum number of active warps with the smallest number of blocks).

The flags parameter controls how special cases are handle. Valid flags include:

  • cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior on platform where global caching affects occupancy. On such platforms, if caching is enabled, but per-block SM resource usage would result in zero occupancy, the occupancy calculator will calculate the occupancy as if caching is disabled. Setting this flag makes the occupancy calculator to return 0 in such cases. More information can be found about this feature in the "Unified L1/Texture Cache" section of the Maxwell tuning guide.

Use

See also:

cudaOccupancyMaxPotentialBlockSizeVariableSMem if the amount of per-block dynamic shared memory changes with different block sizes.

Note:

See also:

cudaOccupancyMaxPotentialBlockSize

cudaOccupancyMaxActiveBlocksPerMultiprocessor

cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags

cudaOccupancyMaxPotentialBlockSizeVariableSMem

cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags

template < class T >

__host__cudaError_t cudaStreamAttachMemAsync ( cudaStream_t stream, T* devPtr, size_t length = 0, unsigned int  flags = cudaMemAttachSingle ) [inline]
Attach memory to a stream asynchronously.
Parameters
stream
- Stream in which to enqueue the attach operation
devPtr
- Pointer to memory (must be a pointer to managed memory or to a valid host-accessible region of system-allocated memory)
length
- Length of memory (defaults to zero)
flags
- Must be one of cudaMemAttachGlobal, cudaMemAttachHost or cudaMemAttachSingle (defaults to cudaMemAttachSingle)
Description

Enqueues an operation in stream to specify stream association of length bytes of memory starting from devPtr. This function is a stream-ordered operation, meaning that it is dependent on, and will only take effect when, previous work in stream has completed. Any previous association is automatically replaced.

devPtr must point to an one of the following types of memories:

  • managed memory declared using the __managed__ keyword or allocated with cudaMallocManaged.

  • a valid host-accessible region of system-allocated pageable memory. This type of memory may only be specified if the device associated with the stream reports a non-zero value for the device attribute cudaDevAttrPageableMemoryAccess.

For managed allocations, length must be either zero or the entire allocation's size. Both indicate that the entire allocation's stream association is being changed. Currently, it is not possible to change stream association for a portion of a managed allocation.

For pageable allocations, length must be non-zero.

The stream association is specified using flags which must be one of cudaMemAttachGlobal, cudaMemAttachHost or cudaMemAttachSingle. The default value for flags is cudaMemAttachSingle If the cudaMemAttachGlobal flag is specified, the memory can be accessed by any stream on any device. If the cudaMemAttachHost flag is specified, the program makes a guarantee that it won't access the memory on the device from any stream on a device that has a zero value for the device attribute cudaDevAttrConcurrentManagedAccess. If the cudaMemAttachSingle flag is specified and stream is associated with a device that has a zero value for the device attribute cudaDevAttrConcurrentManagedAccess, the program makes a guarantee that it will only access the memory on the device from stream. It is illegal to attach singly to the NULL stream, because the NULL stream is a virtual global stream and not a specific stream. An error will be returned in this case.

When memory is associated with a single stream, the Unified Memory system will allow CPU access to this memory region so long as all operations in stream have completed, regardless of whether other streams are active. In effect, this constrains exclusive ownership of the managed memory region by an active GPU to per-stream activity instead of whole-GPU activity.

Accessing memory on the device from streams that are not associated with it will produce undefined results. No error checking is performed by the Unified Memory system to ensure that kernels launched into other streams do not access this region.

It is a program's responsibility to order calls to cudaStreamAttachMemAsync via events, synchronization or other means to ensure legal access to memory at all times. Data visibility and coherency will be changed appropriately for all kernels which follow a stream-association change.

If stream is destroyed while data is associated with it, the association is removed and the association reverts to the default visibility of the allocation as specified at cudaMallocManaged. For __managed__ variables, the default association is always cudaMemAttachGlobal. Note that destroying a stream is an asynchronous operation, and as a result, the change to default association won't happen until all work in the stream has completed.

Note:

See also:

cudaStreamCreate, cudaStreamCreateWithFlags, cudaStreamWaitEvent, cudaStreamSynchronize, cudaStreamAddCallback, cudaStreamDestroy, cudaMallocManaged

template < class T, int dim, enum cudaTextureReadMode readMode >

__host__cudaError_t cudaUnbindTexture ( const texture < T, dim, readMode > & tex ) [inline]
[C++ API] Unbinds a texture
Parameters
tex
- Texture to unbind
Description

Unbinds the texture bound to tex. If texref is not currently bound, no operation is performed.

Note:

See also:

cudaCreateChannelDesc ( C++ API), cudaGetChannelDesc, cudaGetTextureReference, cudaBindTexture ( C++ API), cudaBindTexture ( C++ API, inherited channel descriptor), cudaBindTexture2D ( C++ API), cudaBindTexture2D ( C++ API, inherited channel descriptor), cudaBindTextureToArray ( C++ API), cudaBindTextureToArray ( C++ API, inherited channel descriptor), cudaUnbindTexture ( C API), cudaGetTextureAlignmentOffset ( C++ API)