1
mirror of https://github.com/hashcat/hashcat synced 2024-11-28 05:21:38 +01:00
hashcat/OpenCL/inc_vendor.h

103 lines
1.9 KiB
C
Raw Normal View History

2015-12-04 15:47:52 +01:00
/**
* Author......: See docs/credits.txt
2015-12-04 15:47:52 +01:00
* License.....: MIT
*/
/**
* device type
*/
#define DEVICE_TYPE_CPU 2
#define DEVICE_TYPE_GPU 4
#define DEVICE_TYPE_ACCEL 8
#if DEVICE_TYPE == DEVICE_TYPE_CPU
#define IS_CPU
#elif DEVICE_TYPE == DEVICE_TYPE_GPU
#define IS_GPU
#elif DEVICE_TYPE == DEVICE_TYPE_ACCEL
#define IS_ACCEL
#endif
2015-12-04 15:47:52 +01:00
/**
* vendor specific
*/
#if VENDOR_ID == (1 << 0)
2015-12-04 15:47:52 +01:00
#define IS_AMD
2016-05-14 19:45:51 +02:00
#elif VENDOR_ID == (1 << 1)
#define IS_APPLE
#define IS_GENERIC
#elif VENDOR_ID == (1 << 2)
#define IS_INTEL_BEIGNET
#define IS_GENERIC
#elif VENDOR_ID == (1 << 3)
#define IS_INTEL_SDK
#define IS_GENERIC
#elif VENDOR_ID == (1 << 4)
#define IS_MESA
#define IS_GENERIC
#elif VENDOR_ID == (1 << 5)
2015-12-04 15:47:52 +01:00
#define IS_NV
2016-05-14 19:45:51 +02:00
#elif VENDOR_ID == (1 << 6)
#define IS_POCL
#define IS_GENERIC
#else
#define IS_GENERIC
#endif
#define LOCAL_MEM_TYPE_LOCAL 1
#define LOCAL_MEM_TYPE_GLOBAL 2
#if LOCAL_MEM_TYPE == LOCAL_MEM_TYPE_LOCAL
#define REAL_SHM
#endif
#ifdef REAL_SHM
#define SHM_TYPE __local
#else
#define SHM_TYPE __constant
#endif
/**
* function declarations can have a large influence depending on the opencl runtime
*/
2018-02-08 09:49:59 +01:00
#if defined IS_CPU
#define DECLSPEC inline
2018-02-08 09:49:59 +01:00
#elif defined IS_GPU
#if defined IS_AMD
2018-02-08 09:42:59 +01:00
#define DECLSPEC inline
#else
#define DECLSPEC
2018-02-08 09:42:59 +01:00
#endif
#else
2018-02-08 09:42:59 +01:00
#define DECLSPEC
#endif
2015-12-04 15:47:52 +01:00
/**
* AMD specific
*/
#ifdef IS_AMD
#if defined(cl_amd_media_ops)
#pragma OPENCL EXTENSION cl_amd_media_ops : enable
#endif
#if defined(cl_amd_media_ops2)
2015-12-04 15:47:52 +01:00
#pragma OPENCL EXTENSION cl_amd_media_ops2 : enable
#endif
#endif
2015-12-04 15:47:52 +01:00
/**
* Unrolling is generally enabled, for all device types and hash modes
* There's a few exception when it's better not to unroll
2016-11-13 20:58:28 +01:00
* Some algorithms run into too much register pressure due to loop unrolling
*/
2015-12-04 15:47:52 +01:00
2016-11-13 20:58:28 +01:00
// generic vendors: those algos have shown that they produce better results on both amd and nv when not unrolled
// so we can assume they will produce better results on other vendors as well
#ifdef NO_UNROLL
#undef _unroll
#endif