diff options
Diffstat (limited to 'compat')
33 files changed, 7321 insertions, 122 deletions
diff --git a/compat/aix/math.h b/compat/aix/math.h index 380f878..dee13c8 100644 --- a/compat/aix/math.h +++ b/compat/aix/math.h @@ -2,25 +2,25 @@ * Work around the class() function in AIX math.h clashing with * identifiers named "class". * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifndef LIBAV_COMPAT_AIX_MATH_H -#define LIBAV_COMPAT_AIX_MATH_H +#ifndef COMPAT_AIX_MATH_H +#define COMPAT_AIX_MATH_H #define class class_in_math_h_causes_problems @@ -28,4 +28,4 @@ #undef class -#endif /* LIBAV_COMPAT_AIX_MATH_H */ +#endif /* COMPAT_AIX_MATH_H */ diff --git a/compat/atomics/dummy/stdatomic.h b/compat/atomics/dummy/stdatomic.h index 374e1e5..59d85f9 100644 --- a/compat/atomics/dummy/stdatomic.h +++ b/compat/atomics/dummy/stdatomic.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -21,8 +21,8 @@ * Copyright (C) 2010 Rémi Denis-Courmont */ -#ifndef LIBAV_COMPAT_ATOMICS_DUMMY_STDATOMIC_H -#define LIBAV_COMPAT_ATOMICS_DUMMY_STDATOMIC_H +#ifndef COMPAT_ATOMICS_DUMMY_STDATOMIC_H +#define COMPAT_ATOMICS_DUMMY_STDATOMIC_H #include <stdint.h> @@ -156,7 +156,7 @@ FETCH_MODIFY(and, &) atomic_fetch_or(object, operand) #define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) + atomic_fetch_xor(object, operand) #define atomic_fetch_and_explicit(object, operand, order) \ atomic_fetch_and(object, operand) @@ -173,4 +173,4 @@ FETCH_MODIFY(and, &) #define atomic_flag_clear_explicit(object, order) \ atomic_flag_clear(object) -#endif /* LIBAV_COMPAT_ATOMICS_DUMMY_STDATOMIC_H */ +#endif /* COMPAT_ATOMICS_DUMMY_STDATOMIC_H */ diff --git a/compat/atomics/gcc/stdatomic.h b/compat/atomics/gcc/stdatomic.h index 67168ab..e13ed0e 100644 --- a/compat/atomics/gcc/stdatomic.h +++ b/compat/atomics/gcc/stdatomic.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -21,8 +21,8 @@ * Copyright (C) 2010 Rémi Denis-Courmont */ -#ifndef LIBAV_COMPAT_ATOMICS_GCC_STDATOMIC_H -#define LIBAV_COMPAT_ATOMICS_GCC_STDATOMIC_H +#ifndef COMPAT_ATOMICS_GCC_STDATOMIC_H +#define COMPAT_ATOMICS_GCC_STDATOMIC_H #include <stddef.h> #include <stdint.h> @@ -100,8 +100,8 @@ do { \ #define atomic_exchange(object, desired) \ ({ \ - typeof(object) _obj = (object); \ - typeof(*object) _old; \ + __typeof__(object) _obj = (object); \ + __typeof__(*object) _old; \ do \ _old = atomic_load(_obj); \ while (!__sync_bool_compare_and_swap(_obj, _old, (desired))); \ @@ -113,8 +113,8 @@ do { \ #define atomic_compare_exchange_strong(object, expected, desired) \ ({ \ - typeof(object) _exp = (expected); \ - typeof(*object) _old = *_exp; \ + __typeof__(object) _exp = (expected); \ + __typeof__(*object) _old = *_exp; \ *_exp = __sync_val_compare_and_swap((object), _old, (desired)); \ *_exp == _old; \ }) @@ -147,10 +147,10 @@ do { \ atomic_fetch_or(object, operand) #define atomic_fetch_xor(object, operand) \ - __sync_fetch_and_sub(object, operand) + __sync_fetch_and_xor(object, operand) #define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) + atomic_fetch_xor(object, operand) #define atomic_fetch_and(object, operand) \ __sync_fetch_and_and(object, operand) @@ -170,4 +170,4 @@ do { \ #define atomic_flag_clear_explicit(object, order) \ atomic_flag_clear(object) -#endif /* LIBAV_COMPAT_ATOMICS_GCC_STDATOMIC_H */ +#endif /* COMPAT_ATOMICS_GCC_STDATOMIC_H */ diff --git a/compat/atomics/pthread/stdatomic.c b/compat/atomics/pthread/stdatomic.c index 0d1ecfe..9fca989 100644 --- a/compat/atomics/pthread/stdatomic.c +++ b/compat/atomics/pthread/stdatomic.c @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/compat/atomics/pthread/stdatomic.h b/compat/atomics/pthread/stdatomic.h index a4aa9bb..81a60f1 100644 --- a/compat/atomics/pthread/stdatomic.h +++ b/compat/atomics/pthread/stdatomic.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -21,8 +21,8 @@ * Copyright (C) 2010 Rémi Denis-Courmont */ -#ifndef LIBAV_COMPAT_ATOMICS_PTHREAD_STDATOMIC_H -#define LIBAV_COMPAT_ATOMICS_PTHREAD_STDATOMIC_H +#ifndef COMPAT_ATOMICS_PTHREAD_STDATOMIC_H +#define COMPAT_ATOMICS_PTHREAD_STDATOMIC_H #include <stdint.h> @@ -177,7 +177,7 @@ FETCH_MODIFY(and, &) atomic_fetch_or(object, operand) #define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) + atomic_fetch_xor(object, operand) #define atomic_fetch_and_explicit(object, operand, order) \ atomic_fetch_and(object, operand) @@ -194,4 +194,4 @@ FETCH_MODIFY(and, &) #define atomic_flag_clear_explicit(object, order) \ atomic_flag_clear(object) -#endif /* LIBAV_COMPAT_ATOMICS_PTHREAD_STDATOMIC_H */ +#endif /* COMPAT_ATOMICS_PTHREAD_STDATOMIC_H */ diff --git a/compat/atomics/suncc/stdatomic.h b/compat/atomics/suncc/stdatomic.h index 32129aa..4a864a4 100644 --- a/compat/atomics/suncc/stdatomic.h +++ b/compat/atomics/suncc/stdatomic.h @@ -1,23 +1,23 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifndef LIBAV_COMPAT_ATOMICS_SUNCC_STDATOMIC_H -#define LIBAV_COMPAT_ATOMICS_SUNCC_STDATOMIC_H +#ifndef COMPAT_ATOMICS_SUNCC_STDATOMIC_H +#define COMPAT_ATOMICS_SUNCC_STDATOMIC_H #include <atomic.h> #include <mbarrier.h> @@ -166,7 +166,7 @@ static inline intptr_t atomic_fetch_and(intptr_t *object, intptr_t operand) atomic_fetch_or(object, operand) #define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) + atomic_fetch_xor(object, operand) #define atomic_fetch_and_explicit(object, operand, order) \ atomic_fetch_and(object, operand) @@ -183,4 +183,4 @@ static inline intptr_t atomic_fetch_and(intptr_t *object, intptr_t operand) #define atomic_flag_clear_explicit(object, order) \ atomic_flag_clear(object) -#endif /* LIBAV_COMPAT_ATOMICS_SUNCC_STDATOMIC_H */ +#endif /* COMPAT_ATOMICS_SUNCC_STDATOMIC_H */ diff --git a/compat/atomics/win32/stdatomic.h b/compat/atomics/win32/stdatomic.h index bdd3933..092f453 100644 --- a/compat/atomics/win32/stdatomic.h +++ b/compat/atomics/win32/stdatomic.h @@ -1,24 +1,25 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifndef LIBAV_COMPAT_ATOMICS_WIN32_STDATOMIC_H -#define LIBAV_COMPAT_ATOMICS_WIN32_STDATOMIC_H +#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H +#define COMPAT_ATOMICS_WIN32_STDATOMIC_H +#define WIN32_LEAN_AND_MEAN #include <stddef.h> #include <stdint.h> #include <windows.h> @@ -159,7 +160,7 @@ static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *exp atomic_fetch_or(object, operand) #define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) + atomic_fetch_xor(object, operand) #define atomic_fetch_and_explicit(object, operand, order) \ atomic_fetch_and(object, operand) @@ -176,4 +177,4 @@ static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *exp #define atomic_flag_clear_explicit(object, order) \ atomic_flag_clear(object) -#endif /* LIBAV_COMPAT_ATOMICS_WIN32_STDATOMIC_H */ +#endif /* COMPAT_ATOMICS_WIN32_STDATOMIC_H */ diff --git a/compat/avisynth/avisynth_c.h b/compat/avisynth/avisynth_c.h new file mode 100644 index 0000000..605b92a --- /dev/null +++ b/compat/avisynth/avisynth_c.h @@ -0,0 +1,1064 @@ +// Avisynth C Interface Version 0.20 +// Copyright 2003 Kevin Atkinson + +// This program is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 2 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +// MA 02110-1301 USA, or visit +// http://www.gnu.org/copyleft/gpl.html . +// +// As a special exception, I give you permission to link to the +// Avisynth C interface with independent modules that communicate with +// the Avisynth C interface solely through the interfaces defined in +// avisynth_c.h, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting combined work +// under terms of your choice, provided that every copy of the +// combined work is accompanied by a complete copy of the source code +// of the Avisynth C interface and Avisynth itself (with the version +// used to produce the combined work), being distributed under the +// terms of the GNU General Public License plus this exception. An +// independent module is a module which is not derived from or based +// on Avisynth C Interface, such as 3rd-party filters, import and +// export plugins, or graphical user interfaces. + +// NOTE: this is a partial update of the Avisynth C interface to recognize +// new color spaces added in Avisynth 2.60. By no means is this document +// completely Avisynth 2.60 compliant. + +#ifndef __AVISYNTH_C__ +#define __AVISYNTH_C__ + +#include "avs/config.h" +#include "avs/capi.h" +#include "avs/types.h" + + +///////////////////////////////////////////////////////////////////// +// +// Constants +// + +#ifndef __AVISYNTH_6_H__ +enum { AVISYNTH_INTERFACE_VERSION = 6 }; +#endif + +enum {AVS_SAMPLE_INT8 = 1<<0, + AVS_SAMPLE_INT16 = 1<<1, + AVS_SAMPLE_INT24 = 1<<2, + AVS_SAMPLE_INT32 = 1<<3, + AVS_SAMPLE_FLOAT = 1<<4}; + +enum {AVS_PLANAR_Y=1<<0, + AVS_PLANAR_U=1<<1, + AVS_PLANAR_V=1<<2, + AVS_PLANAR_ALIGNED=1<<3, + AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED, + AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED, + AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED, + AVS_PLANAR_A=1<<4, + AVS_PLANAR_R=1<<5, + AVS_PLANAR_G=1<<6, + AVS_PLANAR_B=1<<7, + AVS_PLANAR_A_ALIGNED=AVS_PLANAR_A|AVS_PLANAR_ALIGNED, + AVS_PLANAR_R_ALIGNED=AVS_PLANAR_R|AVS_PLANAR_ALIGNED, + AVS_PLANAR_G_ALIGNED=AVS_PLANAR_G|AVS_PLANAR_ALIGNED, + AVS_PLANAR_B_ALIGNED=AVS_PLANAR_B|AVS_PLANAR_ALIGNED}; + + // Colorspace properties. +enum { + AVS_CS_YUVA = 1 << 27, + AVS_CS_BGR = 1 << 28, + AVS_CS_YUV = 1 << 29, + AVS_CS_INTERLEAVED = 1 << 30, + AVS_CS_PLANAR = 1 << 31, + + AVS_CS_SHIFT_SUB_WIDTH = 0, + AVS_CS_SHIFT_SUB_HEIGHT = 8, + AVS_CS_SHIFT_SAMPLE_BITS = 16, + + AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH, + AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24 + AVS_CS_SUB_WIDTH_2 = 0 << AVS_CS_SHIFT_SUB_WIDTH, // YV12, I420, YV16 + AVS_CS_SUB_WIDTH_4 = 1 << AVS_CS_SHIFT_SUB_WIDTH, // YUV9, YV411 + + AVS_CS_VPLANEFIRST = 1 << 3, // YV12, YV16, YV24, YV411, YUV9 + AVS_CS_UPLANEFIRST = 1 << 4, // I420 + + AVS_CS_SUB_HEIGHT_MASK = 7 << AVS_CS_SHIFT_SUB_HEIGHT, + AVS_CS_SUB_HEIGHT_1 = 3 << AVS_CS_SHIFT_SUB_HEIGHT, // YV16, YV24, YV411 + AVS_CS_SUB_HEIGHT_2 = 0 << AVS_CS_SHIFT_SUB_HEIGHT, // YV12, I420 + AVS_CS_SUB_HEIGHT_4 = 1 << AVS_CS_SHIFT_SUB_HEIGHT, // YUV9 + + AVS_CS_SAMPLE_BITS_MASK = 7 << AVS_CS_SHIFT_SAMPLE_BITS, + AVS_CS_SAMPLE_BITS_8 = 0 << AVS_CS_SHIFT_SAMPLE_BITS, + AVS_CS_SAMPLE_BITS_10 = 5 << AVS_CS_SHIFT_SAMPLE_BITS, + AVS_CS_SAMPLE_BITS_12 = 6 << AVS_CS_SHIFT_SAMPLE_BITS, + AVS_CS_SAMPLE_BITS_14 = 7 << AVS_CS_SHIFT_SAMPLE_BITS, + AVS_CS_SAMPLE_BITS_16 = 1 << AVS_CS_SHIFT_SAMPLE_BITS, + AVS_CS_SAMPLE_BITS_32 = 2 << AVS_CS_SHIFT_SAMPLE_BITS, + + AVS_CS_PLANAR_MASK = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_BGR | AVS_CS_YUVA | AVS_CS_SAMPLE_BITS_MASK | AVS_CS_SUB_HEIGHT_MASK | AVS_CS_SUB_WIDTH_MASK, + AVS_CS_PLANAR_FILTER = ~(AVS_CS_VPLANEFIRST | AVS_CS_UPLANEFIRST), + + AVS_CS_RGB_TYPE = 1 << 0, + AVS_CS_RGBA_TYPE = 1 << 1, + + AVS_CS_GENERIC_YUV420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // 4:2:0 planar + AVS_CS_GENERIC_YUV422 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // 4:2:2 planar + AVS_CS_GENERIC_YUV444 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1, // 4:4:4 planar + AVS_CS_GENERIC_Y = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV, // Y only (4:0:0) + AVS_CS_GENERIC_RGBP = AVS_CS_PLANAR | AVS_CS_BGR | AVS_CS_RGB_TYPE, // planar RGB + AVS_CS_GENERIC_RGBAP = AVS_CS_PLANAR | AVS_CS_BGR | AVS_CS_RGBA_TYPE, // planar RGBA + AVS_CS_GENERIC_YUVA420 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // 4:2:0:A planar + AVS_CS_GENERIC_YUVA422 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // 4:2:2:A planar + AVS_CS_GENERIC_YUVA444 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1 }; // 4:4:4:A planar + + + // Specific colorformats +enum { + AVS_CS_UNKNOWN = 0, + AVS_CS_BGR24 = AVS_CS_RGB_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED, + AVS_CS_BGR32 = AVS_CS_RGBA_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED, + AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED, + // AVS_CS_YV12 = 1<<3 Reserved + // AVS_CS_I420 = 1<<4 Reserved + AVS_CS_RAW32 = 1<<5 | AVS_CS_INTERLEAVED, + + AVS_CS_YV24 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_8, // YVU 4:4:4 planar + AVS_CS_YV16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:2 planar + AVS_CS_YV12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:0 planar + AVS_CS_I420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_UPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YUV 4:2:0 planar + AVS_CS_IYUV = AVS_CS_I420, + AVS_CS_YV411 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:1 planar + AVS_CS_YUV9 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_4 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:0 planar + AVS_CS_Y8 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_8, // Y 4:0:0 planar + + //------------------------- + // AVS16: new planar constants go live! Experimental PF 160613 + // 10-12-14 bit + planar RGB + BRG48/64 160725 + AVS_CS_YUV444P10 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_10, // YUV 4:4:4 10bit samples + AVS_CS_YUV422P10 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:2 10bit samples + AVS_CS_YUV420P10 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:0 10bit samples + AVS_CS_Y10 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_10, // Y 4:0:0 10bit samples + + AVS_CS_YUV444P12 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_12, // YUV 4:4:4 12bit samples + AVS_CS_YUV422P12 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_12, // YUV 4:2:2 12bit samples + AVS_CS_YUV420P12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_12, // YUV 4:2:0 12bit samples + AVS_CS_Y12 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_12, // Y 4:0:0 12bit samples + + AVS_CS_YUV444P14 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_14, // YUV 4:4:4 14bit samples + AVS_CS_YUV422P14 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_14, // YUV 4:2:2 14bit samples + AVS_CS_YUV420P14 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_14, // YUV 4:2:0 14bit samples + AVS_CS_Y14 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_14, // Y 4:0:0 14bit samples + + AVS_CS_YUV444P16 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_16, // YUV 4:4:4 16bit samples + AVS_CS_YUV422P16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_16, // YUV 4:2:2 16bit samples + AVS_CS_YUV420P16 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_16, // YUV 4:2:0 16bit samples + AVS_CS_Y16 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_16, // Y 4:0:0 16bit samples + + // 32 bit samples (float) + AVS_CS_YUV444PS = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_32, // YUV 4:4:4 32bit samples + AVS_CS_YUV422PS = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_32, // YUV 4:2:2 32bit samples + AVS_CS_YUV420PS = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_32, // YUV 4:2:0 32bit samples + AVS_CS_Y32 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_32, // Y 4:0:0 32bit samples + + // RGB packed + AVS_CS_BGR48 = AVS_CS_RGB_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED | AVS_CS_SAMPLE_BITS_16, // BGR 3x16 bit + AVS_CS_BGR64 = AVS_CS_RGBA_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED | AVS_CS_SAMPLE_BITS_16, // BGR 4x16 bit + // no packed 32 bit (float) support for these legacy types + + // RGB planar + AVS_CS_RGBP = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_8, // Planar RGB 8 bit samples + AVS_CS_RGBP10 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_10, // Planar RGB 10bit samples + AVS_CS_RGBP12 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_12, // Planar RGB 12bit samples + AVS_CS_RGBP14 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_14, // Planar RGB 14bit samples + AVS_CS_RGBP16 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_16, // Planar RGB 16bit samples + AVS_CS_RGBPS = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_32, // Planar RGB 32bit samples + + // RGBA planar + AVS_CS_RGBAP = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_8, // Planar RGBA 8 bit samples + AVS_CS_RGBAP10 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_10, // Planar RGBA 10bit samples + AVS_CS_RGBAP12 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_12, // Planar RGBA 12bit samples + AVS_CS_RGBAP14 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_14, // Planar RGBA 14bit samples + AVS_CS_RGBAP16 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_16, // Planar RGBA 16bit samples + AVS_CS_RGBAPS = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_32, // Planar RGBA 32bit samples + + // Planar YUVA + AVS_CS_YUVA444 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:4:4 8bit samples + AVS_CS_YUVA422 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:2:2 8bit samples + AVS_CS_YUVA420 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:2:0 8bit samples + + AVS_CS_YUVA444P10 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:4:4 10bit samples + AVS_CS_YUVA422P10 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:2:2 10bit samples + AVS_CS_YUVA420P10 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:2:0 10bit samples + + AVS_CS_YUVA444P12 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:4:4 12bit samples + AVS_CS_YUVA422P12 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:2:2 12bit samples + AVS_CS_YUVA420P12 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:2:0 12bit samples + + AVS_CS_YUVA444P14 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:4:4 14bit samples + AVS_CS_YUVA422P14 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:2:2 14bit samples + AVS_CS_YUVA420P14 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:2:0 14bit samples + + AVS_CS_YUVA444P16 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:4:4 16bit samples + AVS_CS_YUVA422P16 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:2:2 16bit samples + AVS_CS_YUVA420P16 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:2:0 16bit samples + + AVS_CS_YUVA444PS = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:4:4 32bit samples + AVS_CS_YUVA422PS = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:2:2 32bit samples + AVS_CS_YUVA420PS = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:2:0 32bit samples + +}; + +enum { + AVS_IT_BFF = 1<<0, + AVS_IT_TFF = 1<<1, + AVS_IT_FIELDBASED = 1<<2}; + +enum { + AVS_FILTER_TYPE=1, + AVS_FILTER_INPUT_COLORSPACE=2, + AVS_FILTER_OUTPUT_TYPE=9, + AVS_FILTER_NAME=4, + AVS_FILTER_AUTHOR=5, + AVS_FILTER_VERSION=6, + AVS_FILTER_ARGS=7, + AVS_FILTER_ARGS_INFO=8, + AVS_FILTER_ARGS_DESCRIPTION=10, + AVS_FILTER_DESCRIPTION=11}; + +enum { //SUBTYPES + AVS_FILTER_TYPE_AUDIO=1, + AVS_FILTER_TYPE_VIDEO=2, + AVS_FILTER_OUTPUT_TYPE_SAME=3, + AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4}; + +enum { + // New 2.6 explicitly defined cache hints. + AVS_CACHE_NOTHING=10, // Do not cache video. + AVS_CACHE_WINDOW=11, // Hard protect upto X frames within a range of X from the current frame N. + AVS_CACHE_GENERIC=12, // LRU cache upto X frames. + AVS_CACHE_FORCE_GENERIC=13, // LRU cache upto X frames, override any previous CACHE_WINDOW. + + AVS_CACHE_GET_POLICY=30, // Get the current policy. + AVS_CACHE_GET_WINDOW=31, // Get the current window h_span. + AVS_CACHE_GET_RANGE=32, // Get the current generic frame range. + + AVS_CACHE_AUDIO=50, // Explicitly do cache audio, X byte cache. + AVS_CACHE_AUDIO_NOTHING=51, // Explicitly do not cache audio. + AVS_CACHE_AUDIO_NONE=52, // Audio cache off (auto mode), X byte intial cache. + AVS_CACHE_AUDIO_AUTO=53, // Audio cache on (auto mode), X byte intial cache. + + AVS_CACHE_GET_AUDIO_POLICY=70, // Get the current audio policy. + AVS_CACHE_GET_AUDIO_SIZE=71, // Get the current audio cache size. + + AVS_CACHE_PREFETCH_FRAME=100, // Queue request to prefetch frame N. + AVS_CACHE_PREFETCH_GO=101, // Action video prefetches. + + AVS_CACHE_PREFETCH_AUDIO_BEGIN=120, // Begin queue request transaction to prefetch audio (take critical section). + AVS_CACHE_PREFETCH_AUDIO_STARTLO=121, // Set low 32 bits of start. + AVS_CACHE_PREFETCH_AUDIO_STARTHI=122, // Set high 32 bits of start. + AVS_CACHE_PREFETCH_AUDIO_COUNT=123, // Set low 32 bits of length. + AVS_CACHE_PREFETCH_AUDIO_COMMIT=124, // Enqueue request transaction to prefetch audio (release critical section). + AVS_CACHE_PREFETCH_AUDIO_GO=125, // Action audio prefetches. + + AVS_CACHE_GETCHILD_CACHE_MODE=200, // Cache ask Child for desired video cache mode. + AVS_CACHE_GETCHILD_CACHE_SIZE=201, // Cache ask Child for desired video cache size. + AVS_CACHE_GETCHILD_AUDIO_MODE=202, // Cache ask Child for desired audio cache mode. + AVS_CACHE_GETCHILD_AUDIO_SIZE=203, // Cache ask Child for desired audio cache size. + + AVS_CACHE_GETCHILD_COST=220, // Cache ask Child for estimated processing cost. + AVS_CACHE_COST_ZERO=221, // Child response of zero cost (ptr arithmetic only). + AVS_CACHE_COST_UNIT=222, // Child response of unit cost (less than or equal 1 full frame blit). + AVS_CACHE_COST_LOW=223, // Child response of light cost. (Fast) + AVS_CACHE_COST_MED=224, // Child response of medium cost. (Real time) + AVS_CACHE_COST_HI=225, // Child response of heavy cost. (Slow) + + AVS_CACHE_GETCHILD_THREAD_MODE=240, // Cache ask Child for thread safetyness. + AVS_CACHE_THREAD_UNSAFE=241, // Only 1 thread allowed for all instances. 2.5 filters default! + AVS_CACHE_THREAD_CLASS=242, // Only 1 thread allowed for each instance. 2.6 filters default! + AVS_CACHE_THREAD_SAFE=243, // Allow all threads in any instance. + AVS_CACHE_THREAD_OWN=244, // Safe but limit to 1 thread, internally threaded. + + AVS_CACHE_GETCHILD_ACCESS_COST=260, // Cache ask Child for preferred access pattern. + AVS_CACHE_ACCESS_RAND=261, // Filter is access order agnostic. + AVS_CACHE_ACCESS_SEQ0=262, // Filter prefers sequential access (low cost) + AVS_CACHE_ACCESS_SEQ1=263, // Filter needs sequential access (high cost) + }; + +#ifdef BUILDING_AVSCORE +struct AVS_ScriptEnvironment { + IScriptEnvironment * env; + const char * error; + AVS_ScriptEnvironment(IScriptEnvironment * e = 0) + : env(e), error(0) {} +}; +#endif + +typedef struct AVS_Clip AVS_Clip; +typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment; + +///////////////////////////////////////////////////////////////////// +// +// AVS_VideoInfo +// + +// AVS_VideoInfo is layed out identicly to VideoInfo +typedef struct AVS_VideoInfo { + int width, height; // width=0 means no video + unsigned fps_numerator, fps_denominator; + int num_frames; + + int pixel_type; + + int audio_samples_per_second; // 0 means no audio + int sample_type; + INT64 num_audio_samples; + int nchannels; + + // Imagetype properties + + int image_type; +} AVS_VideoInfo; + +// useful functions of the above +AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p) + { return (p->width!=0); } + +AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p) + { return (p->audio_samples_per_second!=0); } + +AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p) + { return !!(p->pixel_type&AVS_CS_BGR); } + +AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p) + { return ((p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24) && ((p->pixel_type & AVS_CS_SAMPLE_BITS_MASK) == AVS_CS_SAMPLE_BITS_8); } + +AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p) + { return ((p->pixel_type&AVS_CS_BGR32)==AVS_CS_BGR32) && ((p->pixel_type & AVS_CS_SAMPLE_BITS_MASK) == AVS_CS_SAMPLE_BITS_8); } + +AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p) + { return !!(p->pixel_type&AVS_CS_YUV ); } + +AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p) + { return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; } + +AVSC_API(int, avs_is_rgb48)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_rgb64)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yv24)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yv16)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yv12)(const AVS_VideoInfo * p) ; + +AVSC_API(int, avs_is_yv411)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_y8)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yuv444p16)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yuv422p16)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yuv420p16)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_y16)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yuv444ps)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yuv422ps)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yuv420ps)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_y32)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_444)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_422)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_420)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_y)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_yuva)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_planar_rgb)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_is_planar_rgba)(const AVS_VideoInfo * p); + + + +AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property) + { return ((p->image_type & property)==property ); } + +AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p) + { return !!(p->pixel_type & AVS_CS_PLANAR); } + +AVSC_API(int, avs_is_color_space)(const AVS_VideoInfo * p, int c_space); + +AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p) + { return !!(p->image_type & AVS_IT_FIELDBASED); } + +AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p) + { return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); } + +AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p) + { return !!(p->image_type & AVS_IT_BFF); } + +AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p) + { return !!(p->image_type & AVS_IT_TFF); } + +AVSC_API(int, avs_get_plane_width_subsampling)(const AVS_VideoInfo * p, int plane); + +AVSC_API(int, avs_get_plane_height_subsampling)(const AVS_VideoInfo * p, int plane); + + +AVSC_API(int, avs_bits_per_pixel)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_bytes_from_pixels)(const AVS_VideoInfo * p, int pixels); + +AVSC_API(int, avs_row_size)(const AVS_VideoInfo * p, int plane); + +AVSC_API(int, avs_bmp_size)(const AVS_VideoInfo * vi); + +AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p) + { return p->audio_samples_per_second; } + + +AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p) +{ + switch (p->sample_type) { + case AVS_SAMPLE_INT8: return sizeof(signed char); + case AVS_SAMPLE_INT16: return sizeof(signed short); + case AVS_SAMPLE_INT24: return 3; + case AVS_SAMPLE_INT32: return sizeof(signed int); + case AVS_SAMPLE_FLOAT: return sizeof(float); + default: return 0; + } +} +AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p) + { return p->nchannels*avs_bytes_per_channel_sample(p);} + +AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames) + { return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); } + +AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples) + { return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); } + +AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes) + { return bytes / avs_bytes_per_audio_sample(p); } + +AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples) + { return samples * avs_bytes_per_audio_sample(p); } + +AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p) + { return p->nchannels; } + +AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p) + { return p->sample_type;} + +// useful mutator +AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property) + { p->image_type|=property; } + +AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property) + { p->image_type&=~property; } + +AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased) + { if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; } + +AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator) +{ + unsigned x=numerator, y=denominator; + while (y) { // find gcd + unsigned t = x%y; x = y; y = t; + } + p->fps_numerator = numerator/x; + p->fps_denominator = denominator/x; +} + +#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR +AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y) +{ + return (x->pixel_type == y->pixel_type) + || (avs_is_yv12(x) && avs_is_yv12(y)); +} +#endif + +AVSC_API(int, avs_num_components)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_component_size)(const AVS_VideoInfo * p); + +AVSC_API(int, avs_bits_per_component)(const AVS_VideoInfo * p); + +///////////////////////////////////////////////////////////////////// +// +// AVS_VideoFrame +// + +// VideoFrameBuffer holds information about a memory block which is used +// for video data. For efficiency, instances of this class are not deleted +// when the refcount reaches zero; instead they're stored in a linked list +// to be reused. The instances are deleted when the corresponding AVS +// file is closed. + +// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer +// DO NOT USE THIS STRUCTURE DIRECTLY +typedef struct AVS_VideoFrameBuffer { + BYTE * data; + int data_size; + // sequence_number is incremented every time the buffer is changed, so + // that stale views can tell they're no longer valid. + volatile long sequence_number; + + volatile long refcount; +} AVS_VideoFrameBuffer; + +// VideoFrame holds a "window" into a VideoFrameBuffer. + +// AVS_VideoFrame is layed out identicly to IVideoFrame +// DO NOT USE THIS STRUCTURE DIRECTLY +typedef struct AVS_VideoFrame { + volatile long refcount; + AVS_VideoFrameBuffer * vfb; + int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture. + int row_sizeUV, heightUV; +} AVS_VideoFrame; + +// Access functions for AVS_VideoFrame +AVSC_API(int, avs_get_pitch_p)(const AVS_VideoFrame * p, int plane); + +#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR +AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) { + return avs_get_pitch_p(p, 0);} +#endif + +AVSC_API(int, avs_get_row_size_p)(const AVS_VideoFrame * p, int plane); + +AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) { + return p->row_size; } + +AVSC_API(int, avs_get_height_p)(const AVS_VideoFrame * p, int plane); + +AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) { + return p->height;} + +AVSC_API(const BYTE *, avs_get_read_ptr_p)(const AVS_VideoFrame * p, int plane); + +#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR +AVSC_INLINE const BYTE* avs_get_read_ptr(const AVS_VideoFrame * p) { + return avs_get_read_ptr_p(p, 0);} +#endif + +AVSC_API(int, avs_is_writable)(const AVS_VideoFrame * p); + +AVSC_API(BYTE *, avs_get_write_ptr_p)(const AVS_VideoFrame * p, int plane); + +#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR +AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p) { + return avs_get_write_ptr_p(p, 0);} +#endif + +AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *); +// makes a shallow copy of a video frame +AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *); + +#ifndef AVSC_NO_DECLSPEC +AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f) + {avs_release_video_frame(f);} +AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f) + {return avs_copy_video_frame(f);} +#endif + +///////////////////////////////////////////////////////////////////// +// +// AVS_Value +// + +// Treat AVS_Value as a fat pointer. That is use avs_copy_value +// and avs_release_value appropiaty as you would if AVS_Value was +// a pointer. + +// To maintain source code compatibility with future versions of the +// avisynth_c API don't use the AVS_Value directly. Use the helper +// functions below. + +// AVS_Value is layed out identicly to AVSValue +typedef struct AVS_Value AVS_Value; +struct AVS_Value { + short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong + // for some function e'rror + short array_size; + union { + void * clip; // do not use directly, use avs_take_clip + char boolean; + int integer; + float floating_pt; + const char * string; + const AVS_Value * array; + } d; +}; + +// AVS_Value should be initilized with avs_void. +// Should also set to avs_void after the value is released +// with avs_copy_value. Consider it the equalvent of setting +// a pointer to NULL +static const AVS_Value avs_void = {'v'}; + +AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src); +AVSC_API(void, avs_release_value)(AVS_Value); + +AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; } +AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; } +AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; } +AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; } +AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; } +AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; } +AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; } +AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; } + +AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *); +AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *); + +AVSC_INLINE int avs_as_bool(AVS_Value v) + { return v.d.boolean; } +AVSC_INLINE int avs_as_int(AVS_Value v) + { return v.d.integer; } +AVSC_INLINE const char * avs_as_string(AVS_Value v) + { return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; } +AVSC_INLINE double avs_as_float(AVS_Value v) + { return avs_is_int(v) ? v.d.integer : v.d.floating_pt; } +AVSC_INLINE const char * avs_as_error(AVS_Value v) + { return avs_is_error(v) ? v.d.string : 0; } +AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v) + { return v.d.array; } +AVSC_INLINE int avs_array_size(AVS_Value v) + { return avs_is_array(v) ? v.array_size : 1; } +AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index) + { return avs_is_array(v) ? v.d.array[index] : v; } + +// only use these functions on an AVS_Value that does not already have +// an active value. Remember, treat AVS_Value as a fat pointer. +AVSC_INLINE AVS_Value avs_new_value_bool(int v0) + { AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; } +AVSC_INLINE AVS_Value avs_new_value_int(int v0) + { AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; } +AVSC_INLINE AVS_Value avs_new_value_string(const char * v0) + { AVS_Value v; v.type = 's'; v.d.string = v0; return v; } +AVSC_INLINE AVS_Value avs_new_value_float(float v0) + { AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;} +AVSC_INLINE AVS_Value avs_new_value_error(const char * v0) + { AVS_Value v; v.type = 'e'; v.d.string = v0; return v; } +#ifndef AVSC_NO_DECLSPEC +AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0) + { AVS_Value v; avs_set_to_clip(&v, v0); return v; } +#endif +AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size) + { AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = (short)size; return v; } + +///////////////////////////////////////////////////////////////////// +// +// AVS_Clip +// + +AVSC_API(void, avs_release_clip)(AVS_Clip *); +AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *); + +AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error + +AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *); + +AVSC_API(int, avs_get_version)(AVS_Clip *); + +AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n); +// The returned video frame must be released with avs_release_video_frame + +AVSC_API(int, avs_get_parity)(AVS_Clip *, int n); +// return field parity if field_based, else parity of first field in frame + +AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf, + INT64 start, INT64 count); +// start and count are in samples + +AVSC_API(int, avs_set_cache_hints)(AVS_Clip *, + int cachehints, int frame_range); + +// This is the callback type used by avs_add_function +typedef AVS_Value (AVSC_CC * AVS_ApplyFunc) + (AVS_ScriptEnvironment *, AVS_Value args, void * user_data); + +typedef struct AVS_FilterInfo AVS_FilterInfo; +struct AVS_FilterInfo +{ + // these members should not be modified outside of the AVS_ApplyFunc callback + AVS_Clip * child; + AVS_VideoInfo vi; + AVS_ScriptEnvironment * env; + AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n); + int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n); + int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf, + INT64 start, INT64 count); + int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints, + int frame_range); + void (AVSC_CC * free_filter)(AVS_FilterInfo *); + + // Should be set when ever there is an error to report. + // It is cleared before any of the above methods are called + const char * error; + // this is to store whatever and may be modified at will + void * user_data; +}; + +// Create a new filter +// fi is set to point to the AVS_FilterInfo so that you can +// modify it once it is initilized. +// store_child should generally be set to true. If it is not +// set than ALL methods (the function pointers) must be defined +// If it is set than you do not need to worry about freeing the child +// clip. +AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e, + AVS_FilterInfo * * fi, + AVS_Value child, int store_child); + +///////////////////////////////////////////////////////////////////// +// +// AVS_ScriptEnvironment +// + +// For GetCPUFlags. These are backwards-compatible with those in VirtualDub. +enum { + /* slowest CPU to support extension */ + AVS_CPU_FORCE = 0x01, // N/A + AVS_CPU_FPU = 0x02, // 386/486DX + AVS_CPU_MMX = 0x04, // P55C, K6, PII + AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon + AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP + AVS_CPU_SSE2 = 0x20, // PIV, Hammer + AVS_CPU_3DNOW = 0x40, // K6-2 + AVS_CPU_3DNOW_EXT = 0x80, // Athlon + AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2, + // which only Hammer will have anyway) + AVS_CPUF_SSE3 = 0x100, // PIV+, K8 Venice + AVS_CPUF_SSSE3 = 0x200, // Core 2 + AVS_CPUF_SSE4 = 0x400, // Penryn, Wolfdale, Yorkfield + AVS_CPUF_SSE4_1 = 0x400, +//AVS_CPUF_AVX = 0x800, // Sandy Bridge, Bulldozer + AVS_CPUF_SSE4_2 = 0x1000, // Nehalem +//AVS_CPUF_AVX2 = 0x2000, // Haswell +//AVS_CPUF_AVX512 = 0x4000, // Knights Landing +}; + + +AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error + +AVSC_API(int, avs_get_cpu_flags)(AVS_ScriptEnvironment *); +AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version); + +AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length); +AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...); + +AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, void* val); + // note: val is really a va_list; I hope everyone typedefs va_list to a pointer + +AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *, + const char * name, const char * params, + AVS_ApplyFunc apply, void * user_data); + +AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name); + +AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name, + AVS_Value args, const char** arg_names); +// The returned value must be be released with avs_release_value + +AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name); +// The returned value must be be released with avs_release_value + +AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val); + +AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val); + +//void avs_push_context(AVS_ScriptEnvironment *, int level=0); +//void avs_pop_context(AVS_ScriptEnvironment *); + +AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *, + const AVS_VideoInfo * vi, int align); +// align should be at least 16 + +#ifndef AVSC_NO_DECLSPEC +AVSC_INLINE +AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env, + const AVS_VideoInfo * vi) + {return avs_new_video_frame_a(env,vi,FRAME_ALIGN);} + +AVSC_INLINE +AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env, + const AVS_VideoInfo * vi) + {return avs_new_video_frame_a(env,vi,FRAME_ALIGN);} +#endif + + +AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf); + +AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, BYTE* dstp, int dst_pitch, const BYTE* srcp, int src_pitch, int row_size, int height); + +typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env); +AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data); + +AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height); +// The returned video frame must be be released + +AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem); + +AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir); + +// avisynth.dll exports this; it's a way to use it as a library, without +// writing an AVS script or without going through AVIFile. +AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version); + +// this symbol is the entry point for the plugin and must +// be defined +AVSC_EXPORT +const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env); + + +AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *); + + +AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV); +// The returned video frame must be be released + +#ifdef AVSC_NO_DECLSPEC +// use LoadLibrary and related functions to dynamically load Avisynth instead of declspec(dllimport) +/* + The following functions needs to have been declared, probably from windows.h + + void* malloc(size_t) + void free(void*); + + HMODULE LoadLibrary(const char*); + void* GetProcAddress(HMODULE, const char*); + FreeLibrary(HMODULE); +*/ + + +typedef struct AVS_Library AVS_Library; + +#define AVSC_DECLARE_FUNC(name) name##_func name + +struct AVS_Library { + HMODULE handle; + + AVSC_DECLARE_FUNC(avs_add_function); + AVSC_DECLARE_FUNC(avs_at_exit); + AVSC_DECLARE_FUNC(avs_bit_blt); + AVSC_DECLARE_FUNC(avs_check_version); + AVSC_DECLARE_FUNC(avs_clip_get_error); + AVSC_DECLARE_FUNC(avs_copy_clip); + AVSC_DECLARE_FUNC(avs_copy_value); + AVSC_DECLARE_FUNC(avs_copy_video_frame); + AVSC_DECLARE_FUNC(avs_create_script_environment); + AVSC_DECLARE_FUNC(avs_delete_script_environment); + AVSC_DECLARE_FUNC(avs_function_exists); + AVSC_DECLARE_FUNC(avs_get_audio); + AVSC_DECLARE_FUNC(avs_get_cpu_flags); + AVSC_DECLARE_FUNC(avs_get_frame); + AVSC_DECLARE_FUNC(avs_get_parity); + AVSC_DECLARE_FUNC(avs_get_var); + AVSC_DECLARE_FUNC(avs_get_version); + AVSC_DECLARE_FUNC(avs_get_video_info); + AVSC_DECLARE_FUNC(avs_invoke); + AVSC_DECLARE_FUNC(avs_make_writable); + AVSC_DECLARE_FUNC(avs_new_c_filter); + AVSC_DECLARE_FUNC(avs_new_video_frame_a); + AVSC_DECLARE_FUNC(avs_release_clip); + AVSC_DECLARE_FUNC(avs_release_value); + AVSC_DECLARE_FUNC(avs_release_video_frame); + AVSC_DECLARE_FUNC(avs_save_string); + AVSC_DECLARE_FUNC(avs_set_cache_hints); + AVSC_DECLARE_FUNC(avs_set_global_var); + AVSC_DECLARE_FUNC(avs_set_memory_max); + AVSC_DECLARE_FUNC(avs_set_to_clip); + AVSC_DECLARE_FUNC(avs_set_var); + AVSC_DECLARE_FUNC(avs_set_working_dir); + AVSC_DECLARE_FUNC(avs_sprintf); + AVSC_DECLARE_FUNC(avs_subframe); + AVSC_DECLARE_FUNC(avs_subframe_planar); + AVSC_DECLARE_FUNC(avs_take_clip); + AVSC_DECLARE_FUNC(avs_vsprintf); + + AVSC_DECLARE_FUNC(avs_get_error); + AVSC_DECLARE_FUNC(avs_is_rgb48); + AVSC_DECLARE_FUNC(avs_is_rgb64); + AVSC_DECLARE_FUNC(avs_is_yv24); + AVSC_DECLARE_FUNC(avs_is_yv16); + AVSC_DECLARE_FUNC(avs_is_yv12); + AVSC_DECLARE_FUNC(avs_is_yv411); + AVSC_DECLARE_FUNC(avs_is_y8); + AVSC_DECLARE_FUNC(avs_is_yuv444p16); + AVSC_DECLARE_FUNC(avs_is_yuv422p16); + AVSC_DECLARE_FUNC(avs_is_yuv420p16); + AVSC_DECLARE_FUNC(avs_is_y16); + AVSC_DECLARE_FUNC(avs_is_yuv444ps); + AVSC_DECLARE_FUNC(avs_is_yuv422ps); + AVSC_DECLARE_FUNC(avs_is_yuv420ps); + AVSC_DECLARE_FUNC(avs_is_y32); + AVSC_DECLARE_FUNC(avs_is_444); + AVSC_DECLARE_FUNC(avs_is_422); + AVSC_DECLARE_FUNC(avs_is_420); + AVSC_DECLARE_FUNC(avs_is_y); + AVSC_DECLARE_FUNC(avs_is_yuva); + AVSC_DECLARE_FUNC(avs_is_planar_rgb); + AVSC_DECLARE_FUNC(avs_is_planar_rgba); + AVSC_DECLARE_FUNC(avs_is_color_space); + + AVSC_DECLARE_FUNC(avs_get_plane_width_subsampling); + AVSC_DECLARE_FUNC(avs_get_plane_height_subsampling); + AVSC_DECLARE_FUNC(avs_bits_per_pixel); + AVSC_DECLARE_FUNC(avs_bytes_from_pixels); + AVSC_DECLARE_FUNC(avs_row_size); + AVSC_DECLARE_FUNC(avs_bmp_size); + AVSC_DECLARE_FUNC(avs_get_pitch_p); + AVSC_DECLARE_FUNC(avs_get_row_size_p); + AVSC_DECLARE_FUNC(avs_get_height_p); + AVSC_DECLARE_FUNC(avs_get_read_ptr_p); + AVSC_DECLARE_FUNC(avs_is_writable); + AVSC_DECLARE_FUNC(avs_get_write_ptr_p); + + AVSC_DECLARE_FUNC(avs_num_components); + AVSC_DECLARE_FUNC(avs_component_size); + AVSC_DECLARE_FUNC(avs_bits_per_component); + +}; + +#undef AVSC_DECLARE_FUNC + + +AVSC_INLINE AVS_Library * avs_load_library() { + AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library)); + if (library == NULL) + return NULL; + library->handle = LoadLibrary("avisynth"); + if (library->handle == NULL) + goto fail; + +#define __AVSC_STRINGIFY(x) #x +#define AVSC_STRINGIFY(x) __AVSC_STRINGIFY(x) +#define AVSC_LOAD_FUNC(name) {\ + library->name = (name##_func) GetProcAddress(library->handle, AVSC_STRINGIFY(name));\ + if (library->name == NULL)\ + goto fail;\ +} + + AVSC_LOAD_FUNC(avs_add_function); + AVSC_LOAD_FUNC(avs_at_exit); + AVSC_LOAD_FUNC(avs_bit_blt); + AVSC_LOAD_FUNC(avs_check_version); + AVSC_LOAD_FUNC(avs_clip_get_error); + AVSC_LOAD_FUNC(avs_copy_clip); + AVSC_LOAD_FUNC(avs_copy_value); + AVSC_LOAD_FUNC(avs_copy_video_frame); + AVSC_LOAD_FUNC(avs_create_script_environment); + AVSC_LOAD_FUNC(avs_delete_script_environment); + AVSC_LOAD_FUNC(avs_function_exists); + AVSC_LOAD_FUNC(avs_get_audio); + AVSC_LOAD_FUNC(avs_get_cpu_flags); + AVSC_LOAD_FUNC(avs_get_frame); + AVSC_LOAD_FUNC(avs_get_parity); + AVSC_LOAD_FUNC(avs_get_var); + AVSC_LOAD_FUNC(avs_get_version); + AVSC_LOAD_FUNC(avs_get_video_info); + AVSC_LOAD_FUNC(avs_invoke); + AVSC_LOAD_FUNC(avs_make_writable); + AVSC_LOAD_FUNC(avs_new_c_filter); + AVSC_LOAD_FUNC(avs_new_video_frame_a); + AVSC_LOAD_FUNC(avs_release_clip); + AVSC_LOAD_FUNC(avs_release_value); + AVSC_LOAD_FUNC(avs_release_video_frame); + AVSC_LOAD_FUNC(avs_save_string); + AVSC_LOAD_FUNC(avs_set_cache_hints); + AVSC_LOAD_FUNC(avs_set_global_var); + AVSC_LOAD_FUNC(avs_set_memory_max); + AVSC_LOAD_FUNC(avs_set_to_clip); + AVSC_LOAD_FUNC(avs_set_var); + AVSC_LOAD_FUNC(avs_set_working_dir); + AVSC_LOAD_FUNC(avs_sprintf); + AVSC_LOAD_FUNC(avs_subframe); + AVSC_LOAD_FUNC(avs_subframe_planar); + AVSC_LOAD_FUNC(avs_take_clip); + AVSC_LOAD_FUNC(avs_vsprintf); + + AVSC_LOAD_FUNC(avs_get_error); + AVSC_LOAD_FUNC(avs_is_rgb48); + AVSC_LOAD_FUNC(avs_is_rgb64); + AVSC_LOAD_FUNC(avs_is_yv24); + AVSC_LOAD_FUNC(avs_is_yv16); + AVSC_LOAD_FUNC(avs_is_yv12); + AVSC_LOAD_FUNC(avs_is_yv411); + AVSC_LOAD_FUNC(avs_is_y8); + AVSC_LOAD_FUNC(avs_is_yuv444p16); + AVSC_LOAD_FUNC(avs_is_yuv422p16); + AVSC_LOAD_FUNC(avs_is_yuv420p16); + AVSC_LOAD_FUNC(avs_is_y16); + AVSC_LOAD_FUNC(avs_is_yuv444ps); + AVSC_LOAD_FUNC(avs_is_yuv422ps); + AVSC_LOAD_FUNC(avs_is_yuv420ps); + AVSC_LOAD_FUNC(avs_is_y32); + AVSC_LOAD_FUNC(avs_is_444); + AVSC_LOAD_FUNC(avs_is_422); + AVSC_LOAD_FUNC(avs_is_420); + AVSC_LOAD_FUNC(avs_is_y); + AVSC_LOAD_FUNC(avs_is_yuva); + AVSC_LOAD_FUNC(avs_is_planar_rgb); + AVSC_LOAD_FUNC(avs_is_planar_rgba); + AVSC_LOAD_FUNC(avs_is_color_space); + + AVSC_LOAD_FUNC(avs_get_plane_width_subsampling); + AVSC_LOAD_FUNC(avs_get_plane_height_subsampling); + AVSC_LOAD_FUNC(avs_bits_per_pixel); + AVSC_LOAD_FUNC(avs_bytes_from_pixels); + AVSC_LOAD_FUNC(avs_row_size); + AVSC_LOAD_FUNC(avs_bmp_size); + AVSC_LOAD_FUNC(avs_get_pitch_p); + AVSC_LOAD_FUNC(avs_get_row_size_p); + AVSC_LOAD_FUNC(avs_get_height_p); + AVSC_LOAD_FUNC(avs_get_read_ptr_p); + AVSC_LOAD_FUNC(avs_is_writable); + AVSC_LOAD_FUNC(avs_get_write_ptr_p); + + AVSC_LOAD_FUNC(avs_num_components); + AVSC_LOAD_FUNC(avs_component_size); + AVSC_LOAD_FUNC(avs_bits_per_component); + + + +#undef __AVSC_STRINGIFY +#undef AVSC_STRINGIFY +#undef AVSC_LOAD_FUNC + + return library; + +fail: + free(library); + return NULL; +} + +AVSC_INLINE void avs_free_library(AVS_Library *library) { + if (library == NULL) + return; + FreeLibrary(library->handle); + free(library); +} +#endif + +#endif diff --git a/compat/avisynth/avs/capi.h b/compat/avisynth/avs/capi.h new file mode 100644 index 0000000..6ed6770 --- /dev/null +++ b/compat/avisynth/avs/capi.h @@ -0,0 +1,62 @@ +// Avisynth C Interface Version 0.20 +// Copyright 2003 Kevin Atkinson + +// This program is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 2 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit +// http://www.gnu.org/copyleft/gpl.html . +// +// As a special exception, I give you permission to link to the +// Avisynth C interface with independent modules that communicate with +// the Avisynth C interface solely through the interfaces defined in +// avisynth_c.h, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting combined work +// under terms of your choice, provided that every copy of the +// combined work is accompanied by a complete copy of the source code +// of the Avisynth C interface and Avisynth itself (with the version +// used to produce the combined work), being distributed under the +// terms of the GNU General Public License plus this exception. An +// independent module is a module which is not derived from or based +// on Avisynth C Interface, such as 3rd-party filters, import and +// export plugins, or graphical user interfaces. + +#ifndef AVS_CAPI_H +#define AVS_CAPI_H + +#ifdef __cplusplus +# define EXTERN_C extern "C" +#else +# define EXTERN_C +#endif + +#ifndef AVSC_USE_STDCALL +# define AVSC_CC __cdecl +#else +# define AVSC_CC __stdcall +#endif + +#define AVSC_INLINE static __inline + +#ifdef BUILDING_AVSCORE +# define AVSC_EXPORT EXTERN_C +# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name +#else +# define AVSC_EXPORT EXTERN_C __declspec(dllexport) +# ifndef AVSC_NO_DECLSPEC +# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name +# else +# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func) +# endif +#endif + +#endif //AVS_CAPI_H diff --git a/compat/avisynth/avs/config.h b/compat/avisynth/avs/config.h new file mode 100644 index 0000000..7acd95b --- /dev/null +++ b/compat/avisynth/avs/config.h @@ -0,0 +1,55 @@ +// Avisynth C Interface Version 0.20 +// Copyright 2003 Kevin Atkinson + +// This program is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 2 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit +// http://www.gnu.org/copyleft/gpl.html . +// +// As a special exception, I give you permission to link to the +// Avisynth C interface with independent modules that communicate with +// the Avisynth C interface solely through the interfaces defined in +// avisynth_c.h, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting combined work +// under terms of your choice, provided that every copy of the +// combined work is accompanied by a complete copy of the source code +// of the Avisynth C interface and Avisynth itself (with the version +// used to produce the combined work), being distributed under the +// terms of the GNU General Public License plus this exception. An +// independent module is a module which is not derived from or based +// on Avisynth C Interface, such as 3rd-party filters, import and +// export plugins, or graphical user interfaces. + +#ifndef AVS_CONFIG_H +#define AVS_CONFIG_H + +// Undefine this to get cdecl calling convention +#define AVSC_USE_STDCALL 1 + +// NOTE TO PLUGIN AUTHORS: +// Because FRAME_ALIGN can be substantially higher than the alignment +// a plugin actually needs, plugins should not use FRAME_ALIGN to check for +// alignment. They should always request the exact alignment value they need. +// This is to make sure that plugins work over the widest range of AviSynth +// builds possible. +#define FRAME_ALIGN 32 + +#if defined(_M_AMD64) || defined(__x86_64) +# define X86_64 +#elif defined(_M_IX86) || defined(__i386__) +# define X86_32 +#else +# error Unsupported CPU architecture. +#endif + +#endif //AVS_CONFIG_H diff --git a/compat/avisynth/avs/types.h b/compat/avisynth/avs/types.h new file mode 100644 index 0000000..e5f084c --- /dev/null +++ b/compat/avisynth/avs/types.h @@ -0,0 +1,51 @@ +// Avisynth C Interface Version 0.20 +// Copyright 2003 Kevin Atkinson + +// This program is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 2 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit +// http://www.gnu.org/copyleft/gpl.html . +// +// As a special exception, I give you permission to link to the +// Avisynth C interface with independent modules that communicate with +// the Avisynth C interface solely through the interfaces defined in +// avisynth_c.h, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting combined work +// under terms of your choice, provided that every copy of the +// combined work is accompanied by a complete copy of the source code +// of the Avisynth C interface and Avisynth itself (with the version +// used to produce the combined work), being distributed under the +// terms of the GNU General Public License plus this exception. An +// independent module is a module which is not derived from or based +// on Avisynth C Interface, such as 3rd-party filters, import and +// export plugins, or graphical user interfaces. + +#ifndef AVS_TYPES_H +#define AVS_TYPES_H + +// Define all types necessary for interfacing with avisynth.dll + +// Raster types used by VirtualDub & Avisynth +typedef unsigned int Pixel32; +typedef unsigned char BYTE; + +// Audio Sample information +typedef float SFLOAT; + +#ifdef __GNUC__ +typedef long long int INT64; +#else +typedef __int64 INT64; +#endif + +#endif //AVS_TYPES_H diff --git a/compat/avisynth/avxsynth_c.h b/compat/avisynth/avxsynth_c.h new file mode 100644 index 0000000..991f4be --- /dev/null +++ b/compat/avisynth/avxsynth_c.h @@ -0,0 +1,728 @@ +// Avisynth C Interface Version 0.20 +// Copyright 2003 Kevin Atkinson + +// This program is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 2 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +// MA 02110-1301 USA, or visit +// http://www.gnu.org/copyleft/gpl.html . +// +// As a special exception, I give you permission to link to the +// Avisynth C interface with independent modules that communicate with +// the Avisynth C interface solely through the interfaces defined in +// avisynth_c.h, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting combined work +// under terms of your choice, provided that every copy of the +// combined work is accompanied by a complete copy of the source code +// of the Avisynth C interface and Avisynth itself (with the version +// used to produce the combined work), being distributed under the +// terms of the GNU General Public License plus this exception. An +// independent module is a module which is not derived from or based +// on Avisynth C Interface, such as 3rd-party filters, import and +// export plugins, or graphical user interfaces. + +#ifndef __AVXSYNTH_C__ +#define __AVXSYNTH_C__ + +#include "windowsPorts/windows2linux.h" +#include <stdarg.h> + +#ifdef __cplusplus +# define EXTERN_C extern "C" +#else +# define EXTERN_C +#endif + +#define AVSC_USE_STDCALL 1 + +#ifndef AVSC_USE_STDCALL +# define AVSC_CC __cdecl +#else +# define AVSC_CC __stdcall +#endif + +#define AVSC_INLINE static __inline + +#ifdef AVISYNTH_C_EXPORTS +# define AVSC_EXPORT EXTERN_C +# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name +#else +# define AVSC_EXPORT EXTERN_C __declspec(dllexport) +# ifndef AVSC_NO_DECLSPEC +# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name +# else +# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func) +# endif +#endif + +#ifdef __GNUC__ +typedef long long int INT64; +#else +typedef __int64 INT64; +#endif + + +///////////////////////////////////////////////////////////////////// +// +// Constants +// + +#ifndef __AVXSYNTH_H__ +enum { AVISYNTH_INTERFACE_VERSION = 3 }; +#endif + +enum {AVS_SAMPLE_INT8 = 1<<0, + AVS_SAMPLE_INT16 = 1<<1, + AVS_SAMPLE_INT24 = 1<<2, + AVS_SAMPLE_INT32 = 1<<3, + AVS_SAMPLE_FLOAT = 1<<4}; + +enum {AVS_PLANAR_Y=1<<0, + AVS_PLANAR_U=1<<1, + AVS_PLANAR_V=1<<2, + AVS_PLANAR_ALIGNED=1<<3, + AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED, + AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED, + AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED}; + + // Colorspace properties. +enum {AVS_CS_BGR = 1<<28, + AVS_CS_YUV = 1<<29, + AVS_CS_INTERLEAVED = 1<<30, + AVS_CS_PLANAR = 1<<31}; + + // Specific colorformats +enum { + AVS_CS_UNKNOWN = 0, + AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED, + AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED, + AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED, + AVS_CS_YV12 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar + AVS_CS_I420 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar + AVS_CS_IYUV = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR // same as above +}; + +enum { + AVS_IT_BFF = 1<<0, + AVS_IT_TFF = 1<<1, + AVS_IT_FIELDBASED = 1<<2}; + +enum { + AVS_FILTER_TYPE=1, + AVS_FILTER_INPUT_COLORSPACE=2, + AVS_FILTER_OUTPUT_TYPE=9, + AVS_FILTER_NAME=4, + AVS_FILTER_AUTHOR=5, + AVS_FILTER_VERSION=6, + AVS_FILTER_ARGS=7, + AVS_FILTER_ARGS_INFO=8, + AVS_FILTER_ARGS_DESCRIPTION=10, + AVS_FILTER_DESCRIPTION=11}; + +enum { //SUBTYPES + AVS_FILTER_TYPE_AUDIO=1, + AVS_FILTER_TYPE_VIDEO=2, + AVS_FILTER_OUTPUT_TYPE_SAME=3, + AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4}; + +enum { + AVS_CACHE_NOTHING=0, + AVS_CACHE_RANGE=1, + AVS_CACHE_ALL=2, + AVS_CACHE_AUDIO=3, + AVS_CACHE_AUDIO_NONE=4, + AVS_CACHE_AUDIO_AUTO=5 +}; + +#define AVS_FRAME_ALIGN 16 + +typedef struct AVS_Clip AVS_Clip; +typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment; + +///////////////////////////////////////////////////////////////////// +// +// AVS_VideoInfo +// + +// AVS_VideoInfo is layed out identicly to VideoInfo +typedef struct AVS_VideoInfo { + int width, height; // width=0 means no video + unsigned fps_numerator, fps_denominator; + int num_frames; + + int pixel_type; + + int audio_samples_per_second; // 0 means no audio + int sample_type; + INT64 num_audio_samples; + int nchannels; + + // Imagetype properties + + int image_type; +} AVS_VideoInfo; + +// useful functions of the above +AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p) + { return (p->width!=0); } + +AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p) + { return (p->audio_samples_per_second!=0); } + +AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p) + { return !!(p->pixel_type&AVS_CS_BGR); } + +AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p) + { return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties + +AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p) + { return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; } + +AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p) + { return !!(p->pixel_type&AVS_CS_YUV ); } + +AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p) + { return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; } + +AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p) + { return ((p->pixel_type & AVS_CS_YV12) == AVS_CS_YV12)||((p->pixel_type & AVS_CS_I420) == AVS_CS_I420); } + +AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space) + { return ((p->pixel_type & c_space) == c_space); } + +AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property) + { return ((p->pixel_type & property)==property ); } + +AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p) + { return !!(p->pixel_type & AVS_CS_PLANAR); } + +AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p) + { return !!(p->image_type & AVS_IT_FIELDBASED); } + +AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p) + { return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); } + +AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p) + { return !!(p->image_type & AVS_IT_BFF); } + +AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p) + { return !!(p->image_type & AVS_IT_TFF); } + +AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p) +{ + switch (p->pixel_type) { + case AVS_CS_BGR24: return 24; + case AVS_CS_BGR32: return 32; + case AVS_CS_YUY2: return 16; + case AVS_CS_YV12: + case AVS_CS_I420: return 12; + default: return 0; + } +} +AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels) + { return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes + +AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p) + { return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images + +AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi) + { if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); } + +AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p) + { return p->audio_samples_per_second; } + + +AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p) +{ + switch (p->sample_type) { + case AVS_SAMPLE_INT8: return sizeof(signed char); + case AVS_SAMPLE_INT16: return sizeof(signed short); + case AVS_SAMPLE_INT24: return 3; + case AVS_SAMPLE_INT32: return sizeof(signed int); + case AVS_SAMPLE_FLOAT: return sizeof(float); + default: return 0; + } +} +AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p) + { return p->nchannels*avs_bytes_per_channel_sample(p);} + +AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames) + { return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); } + +AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples) + { return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); } + +AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes) + { return bytes / avs_bytes_per_audio_sample(p); } + +AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples) + { return samples * avs_bytes_per_audio_sample(p); } + +AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p) + { return p->nchannels; } + +AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p) + { return p->sample_type;} + +// useful mutator +AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property) + { p->image_type|=property; } + +AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property) + { p->image_type&=~property; } + +AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased) + { if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; } + +AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator) +{ + unsigned x=numerator, y=denominator; + while (y) { // find gcd + unsigned t = x%y; x = y; y = t; + } + p->fps_numerator = numerator/x; + p->fps_denominator = denominator/x; +} + +AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y) +{ + return (x->pixel_type == y->pixel_type) + || (avs_is_yv12(x) && avs_is_yv12(y)); +} + +///////////////////////////////////////////////////////////////////// +// +// AVS_VideoFrame +// + +// VideoFrameBuffer holds information about a memory block which is used +// for video data. For efficiency, instances of this class are not deleted +// when the refcount reaches zero; instead they're stored in a linked list +// to be reused. The instances are deleted when the corresponding AVS +// file is closed. + +// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer +// DO NOT USE THIS STRUCTURE DIRECTLY +typedef struct AVS_VideoFrameBuffer { + unsigned char * data; + int data_size; + // sequence_number is incremented every time the buffer is changed, so + // that stale views can tell they're no longer valid. + long sequence_number; + + long refcount; +} AVS_VideoFrameBuffer; + +// VideoFrame holds a "window" into a VideoFrameBuffer. + +// AVS_VideoFrame is layed out identicly to IVideoFrame +// DO NOT USE THIS STRUCTURE DIRECTLY +typedef struct AVS_VideoFrame { + int refcount; + AVS_VideoFrameBuffer * vfb; + int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture. +} AVS_VideoFrame; + +// Access functions for AVS_VideoFrame +AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) { + return p->pitch;} + +AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) { + switch (plane) { + case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;} + return p->pitch;} + +AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) { + return p->row_size; } + +AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) { + int r; + switch (plane) { + case AVS_PLANAR_U: case AVS_PLANAR_V: + if (p->pitchUV) return p->row_size>>1; + else return 0; + case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED: + if (p->pitchUV) { + r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize + if (r < p->pitchUV) + return r; + return p->row_size>>1; + } else return 0; + case AVS_PLANAR_Y_ALIGNED: + r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize + if (r <= p->pitch) + return r; + return p->row_size; + } + return p->row_size; +} + +AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) { + return p->height;} + +AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) { + switch (plane) { + case AVS_PLANAR_U: case AVS_PLANAR_V: + if (p->pitchUV) return p->height>>1; + return 0; + } + return p->height;} + +AVSC_INLINE const unsigned char* avs_get_read_ptr(const AVS_VideoFrame * p) { + return p->vfb->data + p->offset;} + +AVSC_INLINE const unsigned char* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane) +{ + switch (plane) { + case AVS_PLANAR_U: return p->vfb->data + p->offsetU; + case AVS_PLANAR_V: return p->vfb->data + p->offsetV; + default: return p->vfb->data + p->offset;} +} + +AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) { + return (p->refcount == 1 && p->vfb->refcount == 1);} + +AVSC_INLINE unsigned char* avs_get_write_ptr(const AVS_VideoFrame * p) +{ + if (avs_is_writable(p)) { + ++p->vfb->sequence_number; + return p->vfb->data + p->offset; + } else + return 0; +} + +AVSC_INLINE unsigned char* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane) +{ + if (plane==AVS_PLANAR_Y && avs_is_writable(p)) { + ++p->vfb->sequence_number; + return p->vfb->data + p->offset; + } else if (plane==AVS_PLANAR_Y) { + return 0; + } else { + switch (plane) { + case AVS_PLANAR_U: return p->vfb->data + p->offsetU; + case AVS_PLANAR_V: return p->vfb->data + p->offsetV; + default: return p->vfb->data + p->offset; + } + } +} + +#if defined __cplusplus +extern "C" +{ +#endif // __cplusplus +AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *); +// makes a shallow copy of a video frame +AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *); +#if defined __cplusplus +} +#endif // __cplusplus + +#ifndef AVSC_NO_DECLSPEC +AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f) + {avs_release_video_frame(f);} +AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f) + {return avs_copy_video_frame(f);} +#endif + +///////////////////////////////////////////////////////////////////// +// +// AVS_Value +// + +// Treat AVS_Value as a fat pointer. That is use avs_copy_value +// and avs_release_value appropiaty as you would if AVS_Value was +// a pointer. + +// To maintain source code compatibility with future versions of the +// avisynth_c API don't use the AVS_Value directly. Use the helper +// functions below. + +// AVS_Value is layed out identicly to AVSValue +typedef struct AVS_Value AVS_Value; +struct AVS_Value { + short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong + // for some function e'rror + short array_size; + union { + void * clip; // do not use directly, use avs_take_clip + char boolean; + int integer; + INT64 integer64; // match addition of __int64 to avxplugin.h + float floating_pt; + const char * string; + const AVS_Value * array; + } d; +}; + +// AVS_Value should be initilized with avs_void. +// Should also set to avs_void after the value is released +// with avs_copy_value. Consider it the equalvent of setting +// a pointer to NULL +static const AVS_Value avs_void = {'v'}; + +AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src); +AVSC_API(void, avs_release_value)(AVS_Value); + +AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; } +AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; } +AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; } +AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; } +AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; } +AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; } +AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; } +AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; } + +#if defined __cplusplus +extern "C" +{ +#endif // __cplusplus +AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *); +AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *); +#if defined __cplusplus +} +#endif // __cplusplus + +AVSC_INLINE int avs_as_bool(AVS_Value v) + { return v.d.boolean; } +AVSC_INLINE int avs_as_int(AVS_Value v) + { return v.d.integer; } +AVSC_INLINE const char * avs_as_string(AVS_Value v) + { return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; } +AVSC_INLINE double avs_as_float(AVS_Value v) + { return avs_is_int(v) ? v.d.integer : v.d.floating_pt; } +AVSC_INLINE const char * avs_as_error(AVS_Value v) + { return avs_is_error(v) ? v.d.string : 0; } +AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v) + { return v.d.array; } +AVSC_INLINE int avs_array_size(AVS_Value v) + { return avs_is_array(v) ? v.array_size : 1; } +AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index) + { return avs_is_array(v) ? v.d.array[index] : v; } + +// only use these functions on am AVS_Value that does not already have +// an active value. Remember, treat AVS_Value as a fat pointer. +AVSC_INLINE AVS_Value avs_new_value_bool(int v0) + { AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; } +AVSC_INLINE AVS_Value avs_new_value_int(int v0) + { AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; } +AVSC_INLINE AVS_Value avs_new_value_string(const char * v0) + { AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; } +AVSC_INLINE AVS_Value avs_new_value_float(float v0) + { AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;} +AVSC_INLINE AVS_Value avs_new_value_error(const char * v0) + { AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; } +#ifndef AVSC_NO_DECLSPEC +AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0) + { AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; } +#endif +AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size) + { AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; } + +///////////////////////////////////////////////////////////////////// +// +// AVS_Clip +// +#if defined __cplusplus +extern "C" +{ +#endif // __cplusplus +AVSC_API(void, avs_release_clip)(AVS_Clip *); +AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *); + +AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error + +AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *); + +AVSC_API(int, avs_get_version)(AVS_Clip *); + +AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n); +// The returned video frame must be released with avs_release_video_frame + +AVSC_API(int, avs_get_parity)(AVS_Clip *, int n); +// return field parity if field_based, else parity of first field in frame + +AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf, + INT64 start, INT64 count); +// start and count are in samples + +AVSC_API(int, avs_set_cache_hints)(AVS_Clip *, + int cachehints, size_t frame_range); +#if defined __cplusplus +} +#endif // __cplusplus + +// This is the callback type used by avs_add_function +typedef AVS_Value (AVSC_CC * AVS_ApplyFunc) + (AVS_ScriptEnvironment *, AVS_Value args, void * user_data); + +typedef struct AVS_FilterInfo AVS_FilterInfo; +struct AVS_FilterInfo +{ + // these members should not be modified outside of the AVS_ApplyFunc callback + AVS_Clip * child; + AVS_VideoInfo vi; + AVS_ScriptEnvironment * env; + AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n); + int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n); + int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf, + INT64 start, INT64 count); + int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints, + int frame_range); + void (AVSC_CC * free_filter)(AVS_FilterInfo *); + + // Should be set when ever there is an error to report. + // It is cleared before any of the above methods are called + const char * error; + // this is to store whatever and may be modified at will + void * user_data; +}; + +// Create a new filter +// fi is set to point to the AVS_FilterInfo so that you can +// modify it once it is initilized. +// store_child should generally be set to true. If it is not +// set than ALL methods (the function pointers) must be defined +// If it is set than you do not need to worry about freeing the child +// clip. +#if defined __cplusplus +extern "C" +{ +#endif // __cplusplus +AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e, + AVS_FilterInfo * * fi, + AVS_Value child, int store_child); +#if defined __cplusplus +} +#endif // __cplusplus + + +///////////////////////////////////////////////////////////////////// +// +// AVS_ScriptEnvironment +// + +// For GetCPUFlags. These are backwards-compatible with those in VirtualDub. +enum { + /* slowest CPU to support extension */ + AVS_CPU_FORCE = 0x01, // N/A + AVS_CPU_FPU = 0x02, // 386/486DX + AVS_CPU_MMX = 0x04, // P55C, K6, PII + AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon + AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP + AVS_CPU_SSE2 = 0x20, // PIV, Hammer + AVS_CPU_3DNOW = 0x40, // K6-2 + AVS_CPU_3DNOW_EXT = 0x80, // Athlon + AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2, + // which only Hammer will have anyway) +}; + +#if defined __cplusplus +extern "C" +{ +#endif // __cplusplus +AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error + +AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *); +AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version); + +AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length); +AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...); + +AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, va_list val); + // note: val is really a va_list; I hope everyone typedefs va_list to a pointer + +AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *, + const char * name, const char * params, + AVS_ApplyFunc apply, void * user_data); + +AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name); + +AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name, + AVS_Value args, const char** arg_names); +// The returned value must be be released with avs_release_value + +AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name); +// The returned value must be be released with avs_release_value + +AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val); + +AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val); + +//void avs_push_context(AVS_ScriptEnvironment *, int level=0); +//void avs_pop_context(AVS_ScriptEnvironment *); + +AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *, + const AVS_VideoInfo * vi, int align); +// align should be at least 16 +#if defined __cplusplus +} +#endif // __cplusplus + +#ifndef AVSC_NO_DECLSPEC +AVSC_INLINE +AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env, + const AVS_VideoInfo * vi) + {return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);} + +AVSC_INLINE +AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env, + const AVS_VideoInfo * vi) + {return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);} +#endif + +#if defined __cplusplus +extern "C" +{ +#endif // __cplusplus +AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf); + +AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, unsigned char* dstp, int dst_pitch, const unsigned char* srcp, int src_pitch, int row_size, int height); + +typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env); +AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data); + +AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height); +// The returned video frame must be be released + +AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem); + +AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir); + +// avisynth.dll exports this; it's a way to use it as a library, without +// writing an AVS script or without going through AVIFile. +AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version); +#if defined __cplusplus +} +#endif // __cplusplus + +// this symbol is the entry point for the plugin and must +// be defined +AVSC_EXPORT +const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env); + + +#if defined __cplusplus +extern "C" +{ +#endif // __cplusplus +AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *); + + +AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV); +// The returned video frame must be be released +#if defined __cplusplus +} +#endif // __cplusplus + +#endif //__AVXSYNTH_C__ diff --git a/compat/avisynth/windowsPorts/basicDataTypeConversions.h b/compat/avisynth/windowsPorts/basicDataTypeConversions.h new file mode 100644 index 0000000..ff367d5 --- /dev/null +++ b/compat/avisynth/windowsPorts/basicDataTypeConversions.h @@ -0,0 +1,85 @@ +#ifndef __DATA_TYPE_CONVERSIONS_H__ +#define __DATA_TYPE_CONVERSIONS_H__ + +#include <stdint.h> +#include <wchar.h> + +#ifdef __cplusplus +namespace avxsynth { +#endif // __cplusplus + +typedef int64_t __int64; +typedef int32_t __int32; +#ifdef __cplusplus +typedef bool BOOL; +#else +typedef uint32_t BOOL; +#endif // __cplusplus +typedef void* HMODULE; +typedef void* LPVOID; +typedef void* PVOID; +typedef PVOID HANDLE; +typedef HANDLE HWND; +typedef HANDLE HINSTANCE; +typedef void* HDC; +typedef void* HBITMAP; +typedef void* HICON; +typedef void* HFONT; +typedef void* HGDIOBJ; +typedef void* HBRUSH; +typedef void* HMMIO; +typedef void* HACMSTREAM; +typedef void* HACMDRIVER; +typedef void* HIC; +typedef void* HACMOBJ; +typedef HACMSTREAM* LPHACMSTREAM; +typedef void* HACMDRIVERID; +typedef void* LPHACMDRIVER; +typedef unsigned char BYTE; +typedef BYTE* LPBYTE; +typedef char TCHAR; +typedef TCHAR* LPTSTR; +typedef const TCHAR* LPCTSTR; +typedef char* LPSTR; +typedef LPSTR LPOLESTR; +typedef const char* LPCSTR; +typedef LPCSTR LPCOLESTR; +typedef wchar_t WCHAR; +typedef unsigned short WORD; +typedef unsigned int UINT; +typedef UINT MMRESULT; +typedef uint32_t DWORD; +typedef DWORD COLORREF; +typedef DWORD FOURCC; +typedef DWORD HRESULT; +typedef DWORD* LPDWORD; +typedef DWORD* DWORD_PTR; +typedef int32_t LONG; +typedef int32_t* LONG_PTR; +typedef LONG_PTR LRESULT; +typedef uint32_t ULONG; +typedef uint32_t* ULONG_PTR; +//typedef __int64_t intptr_t; +typedef uint64_t _fsize_t; + + +// +// Structures +// + +typedef struct _GUID { + DWORD Data1; + WORD Data2; + WORD Data3; + BYTE Data4[8]; +} GUID; + +typedef GUID REFIID; +typedef GUID CLSID; +typedef CLSID* LPCLSID; +typedef GUID IID; + +#ifdef __cplusplus +}; // namespace avxsynth +#endif // __cplusplus +#endif // __DATA_TYPE_CONVERSIONS_H__ diff --git a/compat/avisynth/windowsPorts/windows2linux.h b/compat/avisynth/windowsPorts/windows2linux.h new file mode 100644 index 0000000..7cf4600 --- /dev/null +++ b/compat/avisynth/windowsPorts/windows2linux.h @@ -0,0 +1,77 @@ +#ifndef __WINDOWS2LINUX_H__ +#define __WINDOWS2LINUX_H__ + +/* + * LINUX SPECIFIC DEFINITIONS +*/ +// +// Data types conversions +// +#include <stdlib.h> +#include <string.h> +#include "basicDataTypeConversions.h" + +#ifdef __cplusplus +namespace avxsynth { +#endif // __cplusplus +// +// purposefully define the following MSFT definitions +// to mean nothing (as they do not mean anything on Linux) +// +#define __stdcall +#define __cdecl +#define noreturn +#define __declspec(x) +#define STDAPI extern "C" HRESULT +#define STDMETHODIMP HRESULT __stdcall +#define STDMETHODIMP_(x) x __stdcall + +#define STDMETHOD(x) virtual HRESULT x +#define STDMETHOD_(a, x) virtual a x + +#ifndef TRUE +#define TRUE true +#endif + +#ifndef FALSE +#define FALSE false +#endif + +#define S_OK (0x00000000) +#define S_FALSE (0x00000001) +#define E_NOINTERFACE (0X80004002) +#define E_POINTER (0x80004003) +#define E_FAIL (0x80004005) +#define E_OUTOFMEMORY (0x8007000E) + +#define INVALID_HANDLE_VALUE ((HANDLE)((LONG_PTR)-1)) +#define FAILED(hr) ((hr) & 0x80000000) +#define SUCCEEDED(hr) (!FAILED(hr)) + + +// +// Functions +// +#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) +#define MAKEWORD(a,b) (((a) << 8) | (b)) + +#define lstrlen strlen +#define lstrcpy strcpy +#define lstrcmpi strcasecmp +#define _stricmp strcasecmp +#define InterlockedIncrement(x) __sync_fetch_and_add((x), 1) +#define InterlockedDecrement(x) __sync_fetch_and_sub((x), 1) +// Windows uses (new, old) ordering but GCC has (old, new) +#define InterlockedCompareExchange(x,y,z) __sync_val_compare_and_swap(x,z,y) + +#define UInt32x32To64(a, b) ( (uint64_t) ( ((uint64_t)((uint32_t)(a))) * ((uint32_t)(b)) ) ) +#define Int64ShrlMod32(a, b) ( (uint64_t) ( (uint64_t)(a) >> (b) ) ) +#define Int32x32To64(a, b) ((__int64)(((__int64)((long)(a))) * ((long)(b)))) + +#define MulDiv(nNumber, nNumerator, nDenominator) (int32_t) (((int64_t) (nNumber) * (int64_t) (nNumerator) + (int64_t) ((nDenominator)/2)) / (int64_t) (nDenominator)) + +#ifdef __cplusplus +}; // namespace avxsynth +#endif // __cplusplus + +#endif // __WINDOWS2LINUX_H__ diff --git a/compat/cuda/dynlink_cuda.h b/compat/cuda/dynlink_cuda.h new file mode 100644 index 0000000..418bbd0 --- /dev/null +++ b/compat/cuda/dynlink_cuda.h @@ -0,0 +1,97 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2016 + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(AV_COMPAT_DYNLINK_CUDA_H) && !defined(CUDA_VERSION) +#define AV_COMPAT_DYNLINK_CUDA_H + +#include <stddef.h> + +#define CUDA_VERSION 7050 + +#if defined(_WIN32) || defined(__CYGWIN__) +#define CUDAAPI __stdcall +#else +#define CUDAAPI +#endif + +#define CU_CTX_SCHED_BLOCKING_SYNC 4 + +typedef int CUdevice; +typedef void* CUarray; +typedef void* CUcontext; +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +typedef unsigned long long CUdeviceptr; +#else +typedef unsigned int CUdeviceptr; +#endif + +typedef enum cudaError_enum { + CUDA_SUCCESS = 0 +} CUresult; + +typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 1, + CU_MEMORYTYPE_DEVICE = 2 +} CUmemorytype; + +typedef struct CUDA_MEMCPY2D_st { + size_t srcXInBytes; + size_t srcY; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + size_t srcPitch; + + size_t dstXInBytes; + size_t dstY; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + size_t dstPitch; + + size_t WidthInBytes; + size_t Height; +} CUDA_MEMCPY2D; + +typedef CUresult CUDAAPI tcuInit(unsigned int Flags); +typedef CUresult CUDAAPI tcuDeviceGetCount(int *count); +typedef CUresult CUDAAPI tcuDeviceGet(CUdevice *device, int ordinal); +typedef CUresult CUDAAPI tcuDeviceGetName(char *name, int len, CUdevice dev); +typedef CUresult CUDAAPI tcuDeviceComputeCapability(int *major, int *minor, CUdevice dev); +typedef CUresult CUDAAPI tcuCtxCreate_v2(CUcontext *pctx, unsigned int flags, CUdevice dev); +typedef CUresult CUDAAPI tcuCtxPushCurrent_v2(CUcontext *pctx); +typedef CUresult CUDAAPI tcuCtxPopCurrent_v2(CUcontext *pctx); +typedef CUresult CUDAAPI tcuCtxDestroy_v2(CUcontext ctx); +typedef CUresult CUDAAPI tcuMemAlloc_v2(CUdeviceptr *dptr, size_t bytesize); +typedef CUresult CUDAAPI tcuMemFree_v2(CUdeviceptr dptr); +typedef CUresult CUDAAPI tcuMemcpy2D_v2(const CUDA_MEMCPY2D *pcopy); +typedef CUresult CUDAAPI tcuGetErrorName(CUresult error, const char** pstr); +typedef CUresult CUDAAPI tcuGetErrorString(CUresult error, const char** pstr); + +#endif diff --git a/compat/cuda/dynlink_cuviddec.h b/compat/cuda/dynlink_cuviddec.h new file mode 100644 index 0000000..4d23764 --- /dev/null +++ b/compat/cuda/dynlink_cuviddec.h @@ -0,0 +1,815 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2010-2016 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/** + * \file cuviddec.h + * NvCuvid API provides Video Decoding interface to NVIDIA GPU devices. + * \date 2015-2016 + * This file contains constants, structure definitions and function prototypes used for decoding. + */ + +#if !defined(__CUDA_VIDEO_H__) +#define __CUDA_VIDEO_H__ + +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +#if (CUDA_VERSION >= 3020) && (!defined(CUDA_FORCE_API_VERSION) || (CUDA_FORCE_API_VERSION >= 3020)) +#define __CUVID_DEVPTR64 +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +#if defined(__CYGWIN__) +typedef unsigned int tcu_ulong; +#else +typedef unsigned long tcu_ulong; +#endif + +typedef void *CUvideodecoder; +typedef struct _CUcontextlock_st *CUvideoctxlock; + +/** + * \addtogroup VIDEO_DECODER Video Decoder + * @{ + */ + +/*! + * \enum cudaVideoCodec + * Video Codec Enums + */ +typedef enum cudaVideoCodec_enum { + cudaVideoCodec_MPEG1=0, /**< MPEG1 */ + cudaVideoCodec_MPEG2, /**< MPEG2 */ + cudaVideoCodec_MPEG4, /**< MPEG4 */ + cudaVideoCodec_VC1, /**< VC1 */ + cudaVideoCodec_H264, /**< H264 */ + cudaVideoCodec_JPEG, /**< JPEG */ + cudaVideoCodec_H264_SVC, /**< H264-SVC */ + cudaVideoCodec_H264_MVC, /**< H264-MVC */ + cudaVideoCodec_HEVC, /**< HEVC */ + cudaVideoCodec_VP8, /**< VP8 */ + cudaVideoCodec_VP9, /**< VP9 */ + cudaVideoCodec_NumCodecs, /**< Max COdecs */ + // Uncompressed YUV + cudaVideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), /**< Y,U,V (4:2:0) */ + cudaVideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,V,U (4:2:0) */ + cudaVideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,UV (4:2:0) */ + cudaVideoCodec_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), /**< YUYV/YUY2 (4:2:2) */ + cudaVideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) /**< UYVY (4:2:2) */ +} cudaVideoCodec; + +/*! + * \enum cudaVideoSurfaceFormat + * Video Surface Formats Enums + */ +typedef enum cudaVideoSurfaceFormat_enum { + cudaVideoSurfaceFormat_NV12=0, /**< NV12 */ + cudaVideoSurfaceFormat_P016=1 /**< P016 */ +} cudaVideoSurfaceFormat; + +/*! + * \enum cudaVideoDeinterlaceMode + * Deinterlacing Modes Enums + */ +typedef enum cudaVideoDeinterlaceMode_enum { + cudaVideoDeinterlaceMode_Weave=0, /**< Weave both fields (no deinterlacing) */ + cudaVideoDeinterlaceMode_Bob, /**< Drop one field */ + cudaVideoDeinterlaceMode_Adaptive /**< Adaptive deinterlacing */ +} cudaVideoDeinterlaceMode; + +/*! + * \enum cudaVideoChromaFormat + * Chroma Formats Enums + */ +typedef enum cudaVideoChromaFormat_enum { + cudaVideoChromaFormat_Monochrome=0, /**< MonoChrome */ + cudaVideoChromaFormat_420, /**< 4:2:0 */ + cudaVideoChromaFormat_422, /**< 4:2:2 */ + cudaVideoChromaFormat_444 /**< 4:4:4 */ +} cudaVideoChromaFormat; + +/*! + * \enum cudaVideoCreateFlags + * Decoder Flags Enums + */ +typedef enum cudaVideoCreateFlags_enum { + cudaVideoCreate_Default = 0x00, /**< Default operation mode: use dedicated video engines */ + cudaVideoCreate_PreferCUDA = 0x01, /**< Use a CUDA-based decoder if faster than dedicated engines (requires a valid vidLock object for multi-threading) */ + cudaVideoCreate_PreferDXVA = 0x02, /**< Go through DXVA internally if possible (requires D3D9 interop) */ + cudaVideoCreate_PreferCUVID = 0x04 /**< Use dedicated video engines directly */ +} cudaVideoCreateFlags; + +/*! + * \struct CUVIDDECODECREATEINFO + * Struct used in create decoder + */ +typedef struct _CUVIDDECODECREATEINFO +{ + tcu_ulong ulWidth; /**< Coded Sequence Width */ + tcu_ulong ulHeight; /**< Coded Sequence Height */ + tcu_ulong ulNumDecodeSurfaces; /**< Maximum number of internal decode surfaces */ + cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */ + cudaVideoChromaFormat ChromaFormat; /**< cudaVideoChromaFormat_XXX (only 4:2:0 is currently supported) */ + tcu_ulong ulCreationFlags; /**< Decoder creation flags (cudaVideoCreateFlags_XXX) */ + tcu_ulong bitDepthMinus8; + tcu_ulong Reserved1[4]; /**< Reserved for future use - set to zero */ + /** + * area of the frame that should be displayed + */ + struct { + short left; + short top; + short right; + short bottom; + } display_area; + + cudaVideoSurfaceFormat OutputFormat; /**< cudaVideoSurfaceFormat_XXX */ + cudaVideoDeinterlaceMode DeinterlaceMode; /**< cudaVideoDeinterlaceMode_XXX */ + tcu_ulong ulTargetWidth; /**< Post-processed Output Width (Should be aligned to 2) */ + tcu_ulong ulTargetHeight; /**< Post-processed Output Height (Should be aligbed to 2) */ + tcu_ulong ulNumOutputSurfaces; /**< Maximum number of output surfaces simultaneously mapped */ + CUvideoctxlock vidLock; /**< If non-NULL, context lock used for synchronizing ownership of the cuda context */ + /** + * target rectangle in the output frame (for aspect ratio conversion) + * if a null rectangle is specified, {0,0,ulTargetWidth,ulTargetHeight} will be used + */ + struct { + short left; + short top; + short right; + short bottom; + } target_rect; + tcu_ulong Reserved2[5]; /**< Reserved for future use - set to zero */ +} CUVIDDECODECREATEINFO; + +/*! + * \struct CUVIDH264DPBENTRY + * H.264 DPB Entry + */ +typedef struct _CUVIDH264DPBENTRY +{ + int PicIdx; /**< picture index of reference frame */ + int FrameIdx; /**< frame_num(short-term) or LongTermFrameIdx(long-term) */ + int is_long_term; /**< 0=short term reference, 1=long term reference */ + int not_existing; /**< non-existing reference frame (corresponding PicIdx should be set to -1) */ + int used_for_reference; /**< 0=unused, 1=top_field, 2=bottom_field, 3=both_fields */ + int FieldOrderCnt[2]; /**< field order count of top and bottom fields */ +} CUVIDH264DPBENTRY; + +/*! + * \struct CUVIDH264MVCEXT + * H.264 MVC Picture Parameters Ext + */ +typedef struct _CUVIDH264MVCEXT +{ + int num_views_minus1; + int view_id; + unsigned char inter_view_flag; + unsigned char num_inter_view_refs_l0; + unsigned char num_inter_view_refs_l1; + unsigned char MVCReserved8Bits; + int InterViewRefsL0[16]; + int InterViewRefsL1[16]; +} CUVIDH264MVCEXT; + +/*! + * \struct CUVIDH264SVCEXT + * H.264 SVC Picture Parameters Ext + */ +typedef struct _CUVIDH264SVCEXT +{ + unsigned char profile_idc; + unsigned char level_idc; + unsigned char DQId; + unsigned char DQIdMax; + unsigned char disable_inter_layer_deblocking_filter_idc; + unsigned char ref_layer_chroma_phase_y_plus1; + signed char inter_layer_slice_alpha_c0_offset_div2; + signed char inter_layer_slice_beta_offset_div2; + + unsigned short DPBEntryValidFlag; + unsigned char inter_layer_deblocking_filter_control_present_flag; + unsigned char extended_spatial_scalability_idc; + unsigned char adaptive_tcoeff_level_prediction_flag; + unsigned char slice_header_restriction_flag; + unsigned char chroma_phase_x_plus1_flag; + unsigned char chroma_phase_y_plus1; + + unsigned char tcoeff_level_prediction_flag; + unsigned char constrained_intra_resampling_flag; + unsigned char ref_layer_chroma_phase_x_plus1_flag; + unsigned char store_ref_base_pic_flag; + unsigned char Reserved8BitsA; + unsigned char Reserved8BitsB; + // For the 4 scaled_ref_layer_XX fields below, + // if (extended_spatial_scalability_idc == 1), SPS field, G.7.3.2.1.4, add prefix "seq_" + // if (extended_spatial_scalability_idc == 2), SLH field, G.7.3.3.4, + short scaled_ref_layer_left_offset; + short scaled_ref_layer_top_offset; + short scaled_ref_layer_right_offset; + short scaled_ref_layer_bottom_offset; + unsigned short Reserved16Bits; + struct _CUVIDPICPARAMS *pNextLayer; /**< Points to the picparams for the next layer to be decoded. Linked list ends at the target layer. */ + int bRefBaseLayer; /**< whether to store ref base pic */ +} CUVIDH264SVCEXT; + +/*! + * \struct CUVIDH264PICPARAMS + * H.264 Picture Parameters + */ +typedef struct _CUVIDH264PICPARAMS +{ + // SPS + int log2_max_frame_num_minus4; + int pic_order_cnt_type; + int log2_max_pic_order_cnt_lsb_minus4; + int delta_pic_order_always_zero_flag; + int frame_mbs_only_flag; + int direct_8x8_inference_flag; + int num_ref_frames; // NOTE: shall meet level 4.1 restrictions + unsigned char residual_colour_transform_flag; + unsigned char bit_depth_luma_minus8; // Must be 0 (only 8-bit supported) + unsigned char bit_depth_chroma_minus8; // Must be 0 (only 8-bit supported) + unsigned char qpprime_y_zero_transform_bypass_flag; + // PPS + int entropy_coding_mode_flag; + int pic_order_present_flag; + int num_ref_idx_l0_active_minus1; + int num_ref_idx_l1_active_minus1; + int weighted_pred_flag; + int weighted_bipred_idc; + int pic_init_qp_minus26; + int deblocking_filter_control_present_flag; + int redundant_pic_cnt_present_flag; + int transform_8x8_mode_flag; + int MbaffFrameFlag; + int constrained_intra_pred_flag; + int chroma_qp_index_offset; + int second_chroma_qp_index_offset; + int ref_pic_flag; + int frame_num; + int CurrFieldOrderCnt[2]; + // DPB + CUVIDH264DPBENTRY dpb[16]; // List of reference frames within the DPB + // Quantization Matrices (raster-order) + unsigned char WeightScale4x4[6][16]; + unsigned char WeightScale8x8[2][64]; + // FMO/ASO + unsigned char fmo_aso_enable; + unsigned char num_slice_groups_minus1; + unsigned char slice_group_map_type; + signed char pic_init_qs_minus26; + unsigned int slice_group_change_rate_minus1; + union + { + unsigned long long slice_group_map_addr; + const unsigned char *pMb2SliceGroupMap; + } fmo; + unsigned int Reserved[12]; + // SVC/MVC + union + { + CUVIDH264MVCEXT mvcext; + CUVIDH264SVCEXT svcext; + } svcmvc; +} CUVIDH264PICPARAMS; + + +/*! + * \struct CUVIDMPEG2PICPARAMS + * MPEG-2 Picture Parameters + */ +typedef struct _CUVIDMPEG2PICPARAMS +{ + int ForwardRefIdx; // Picture index of forward reference (P/B-frames) + int BackwardRefIdx; // Picture index of backward reference (B-frames) + int picture_coding_type; + int full_pel_forward_vector; + int full_pel_backward_vector; + int f_code[2][2]; + int intra_dc_precision; + int frame_pred_frame_dct; + int concealment_motion_vectors; + int q_scale_type; + int intra_vlc_format; + int alternate_scan; + int top_field_first; + // Quantization matrices (raster order) + unsigned char QuantMatrixIntra[64]; + unsigned char QuantMatrixInter[64]; +} CUVIDMPEG2PICPARAMS; + +//////////////////////////////////////////////////////////////////////////////////////////////// +// +// MPEG-4 Picture Parameters +// + +// MPEG-4 has VOP types instead of Picture types +#define I_VOP 0 +#define P_VOP 1 +#define B_VOP 2 +#define S_VOP 3 + +/*! + * \struct CUVIDMPEG4PICPARAMS + * MPEG-4 Picture Parameters + */ +typedef struct _CUVIDMPEG4PICPARAMS +{ + int ForwardRefIdx; // Picture index of forward reference (P/B-frames) + int BackwardRefIdx; // Picture index of backward reference (B-frames) + // VOL + int video_object_layer_width; + int video_object_layer_height; + int vop_time_increment_bitcount; + int top_field_first; + int resync_marker_disable; + int quant_type; + int quarter_sample; + int short_video_header; + int divx_flags; + // VOP + int vop_coding_type; + int vop_coded; + int vop_rounding_type; + int alternate_vertical_scan_flag; + int interlaced; + int vop_fcode_forward; + int vop_fcode_backward; + int trd[2]; + int trb[2]; + // Quantization matrices (raster order) + unsigned char QuantMatrixIntra[64]; + unsigned char QuantMatrixInter[64]; + int gmc_enabled; +} CUVIDMPEG4PICPARAMS; + +/*! + * \struct CUVIDVC1PICPARAMS + * VC1 Picture Parameters + */ +typedef struct _CUVIDVC1PICPARAMS +{ + int ForwardRefIdx; /**< Picture index of forward reference (P/B-frames) */ + int BackwardRefIdx; /**< Picture index of backward reference (B-frames) */ + int FrameWidth; /**< Actual frame width */ + int FrameHeight; /**< Actual frame height */ + // PICTURE + int intra_pic_flag; /**< Set to 1 for I,BI frames */ + int ref_pic_flag; /**< Set to 1 for I,P frames */ + int progressive_fcm; /**< Progressive frame */ + // SEQUENCE + int profile; + int postprocflag; + int pulldown; + int interlace; + int tfcntrflag; + int finterpflag; + int psf; + int multires; + int syncmarker; + int rangered; + int maxbframes; + // ENTRYPOINT + int panscan_flag; + int refdist_flag; + int extended_mv; + int dquant; + int vstransform; + int loopfilter; + int fastuvmc; + int overlap; + int quantizer; + int extended_dmv; + int range_mapy_flag; + int range_mapy; + int range_mapuv_flag; + int range_mapuv; + int rangeredfrm; // range reduction state +} CUVIDVC1PICPARAMS; + +/*! + * \struct CUVIDJPEGPICPARAMS + * JPEG Picture Parameters + */ +typedef struct _CUVIDJPEGPICPARAMS +{ + int Reserved; +} CUVIDJPEGPICPARAMS; + + + /*! + * \struct CUVIDHEVCPICPARAMS + * HEVC Picture Parameters + */ +typedef struct _CUVIDHEVCPICPARAMS +{ + // sps + int pic_width_in_luma_samples; + int pic_height_in_luma_samples; + unsigned char log2_min_luma_coding_block_size_minus3; + unsigned char log2_diff_max_min_luma_coding_block_size; + unsigned char log2_min_transform_block_size_minus2; + unsigned char log2_diff_max_min_transform_block_size; + unsigned char pcm_enabled_flag; + unsigned char log2_min_pcm_luma_coding_block_size_minus3; + unsigned char log2_diff_max_min_pcm_luma_coding_block_size; + unsigned char pcm_sample_bit_depth_luma_minus1; + + unsigned char pcm_sample_bit_depth_chroma_minus1; + unsigned char pcm_loop_filter_disabled_flag; + unsigned char strong_intra_smoothing_enabled_flag; + unsigned char max_transform_hierarchy_depth_intra; + unsigned char max_transform_hierarchy_depth_inter; + unsigned char amp_enabled_flag; + unsigned char separate_colour_plane_flag; + unsigned char log2_max_pic_order_cnt_lsb_minus4; + + unsigned char num_short_term_ref_pic_sets; + unsigned char long_term_ref_pics_present_flag; + unsigned char num_long_term_ref_pics_sps; + unsigned char sps_temporal_mvp_enabled_flag; + unsigned char sample_adaptive_offset_enabled_flag; + unsigned char scaling_list_enable_flag; + unsigned char IrapPicFlag; + unsigned char IdrPicFlag; + + unsigned char bit_depth_luma_minus8; + unsigned char bit_depth_chroma_minus8; + unsigned char reserved1[14]; + + // pps + unsigned char dependent_slice_segments_enabled_flag; + unsigned char slice_segment_header_extension_present_flag; + unsigned char sign_data_hiding_enabled_flag; + unsigned char cu_qp_delta_enabled_flag; + unsigned char diff_cu_qp_delta_depth; + signed char init_qp_minus26; + signed char pps_cb_qp_offset; + signed char pps_cr_qp_offset; + + unsigned char constrained_intra_pred_flag; + unsigned char weighted_pred_flag; + unsigned char weighted_bipred_flag; + unsigned char transform_skip_enabled_flag; + unsigned char transquant_bypass_enabled_flag; + unsigned char entropy_coding_sync_enabled_flag; + unsigned char log2_parallel_merge_level_minus2; + unsigned char num_extra_slice_header_bits; + + unsigned char loop_filter_across_tiles_enabled_flag; + unsigned char loop_filter_across_slices_enabled_flag; + unsigned char output_flag_present_flag; + unsigned char num_ref_idx_l0_default_active_minus1; + unsigned char num_ref_idx_l1_default_active_minus1; + unsigned char lists_modification_present_flag; + unsigned char cabac_init_present_flag; + unsigned char pps_slice_chroma_qp_offsets_present_flag; + + unsigned char deblocking_filter_override_enabled_flag; + unsigned char pps_deblocking_filter_disabled_flag; + signed char pps_beta_offset_div2; + signed char pps_tc_offset_div2; + unsigned char tiles_enabled_flag; + unsigned char uniform_spacing_flag; + unsigned char num_tile_columns_minus1; + unsigned char num_tile_rows_minus1; + + unsigned short column_width_minus1[21]; + unsigned short row_height_minus1[21]; + unsigned int reserved3[15]; + + // RefPicSets + int NumBitsForShortTermRPSInSlice; + int NumDeltaPocsOfRefRpsIdx; + int NumPocTotalCurr; + int NumPocStCurrBefore; + int NumPocStCurrAfter; + int NumPocLtCurr; + int CurrPicOrderCntVal; + int RefPicIdx[16]; // [refpic] Indices of valid reference pictures (-1 if unused for reference) + int PicOrderCntVal[16]; // [refpic] + unsigned char IsLongTerm[16]; // [refpic] 0=not a long-term reference, 1=long-term reference + unsigned char RefPicSetStCurrBefore[8]; // [0..NumPocStCurrBefore-1] -> refpic (0..15) + unsigned char RefPicSetStCurrAfter[8]; // [0..NumPocStCurrAfter-1] -> refpic (0..15) + unsigned char RefPicSetLtCurr[8]; // [0..NumPocLtCurr-1] -> refpic (0..15) + unsigned char RefPicSetInterLayer0[8]; + unsigned char RefPicSetInterLayer1[8]; + unsigned int reserved4[12]; + + // scaling lists (diag order) + unsigned char ScalingList4x4[6][16]; // [matrixId][i] + unsigned char ScalingList8x8[6][64]; // [matrixId][i] + unsigned char ScalingList16x16[6][64]; // [matrixId][i] + unsigned char ScalingList32x32[2][64]; // [matrixId][i] + unsigned char ScalingListDCCoeff16x16[6]; // [matrixId] + unsigned char ScalingListDCCoeff32x32[2]; // [matrixId] +} CUVIDHEVCPICPARAMS; + + +/*! + * \struct CUVIDVP8PICPARAMS + * VP8 Picture Parameters + */ +typedef struct _CUVIDVP8PICPARAMS +{ + int width; + int height; + unsigned int first_partition_size; + //Frame Indexes + unsigned char LastRefIdx; + unsigned char GoldenRefIdx; + unsigned char AltRefIdx; + union { + struct { + unsigned char frame_type : 1; /**< 0 = KEYFRAME, 1 = INTERFRAME */ + unsigned char version : 3; + unsigned char show_frame : 1; + unsigned char update_mb_segmentation_data : 1; /**< Must be 0 if segmentation is not enabled */ + unsigned char Reserved2Bits : 2; + }; + unsigned char wFrameTagFlags; + } tagflags; + unsigned char Reserved1[4]; + unsigned int Reserved2[3]; +} CUVIDVP8PICPARAMS; + +/*! + * \struct CUVIDVP9PICPARAMS + * VP9 Picture Parameters + */ +typedef struct _CUVIDVP9PICPARAMS +{ + unsigned int width; + unsigned int height; + + //Frame Indices + unsigned char LastRefIdx; + unsigned char GoldenRefIdx; + unsigned char AltRefIdx; + unsigned char colorSpace; + + unsigned short profile : 3; + unsigned short frameContextIdx : 2; + unsigned short frameType : 1; + unsigned short showFrame : 1; + unsigned short errorResilient : 1; + unsigned short frameParallelDecoding : 1; + unsigned short subSamplingX : 1; + unsigned short subSamplingY : 1; + unsigned short intraOnly : 1; + unsigned short allow_high_precision_mv : 1; + unsigned short refreshEntropyProbs : 1; + unsigned short reserved2Bits : 2; + + unsigned short reserved16Bits; + + unsigned char refFrameSignBias[4]; + + unsigned char bitDepthMinus8Luma; + unsigned char bitDepthMinus8Chroma; + unsigned char loopFilterLevel; + unsigned char loopFilterSharpness; + + unsigned char modeRefLfEnabled; + unsigned char log2_tile_columns; + unsigned char log2_tile_rows; + + unsigned char segmentEnabled : 1; + unsigned char segmentMapUpdate : 1; + unsigned char segmentMapTemporalUpdate : 1; + unsigned char segmentFeatureMode : 1; + unsigned char reserved4Bits : 4; + + + unsigned char segmentFeatureEnable[8][4]; + short segmentFeatureData[8][4]; + unsigned char mb_segment_tree_probs[7]; + unsigned char segment_pred_probs[3]; + unsigned char reservedSegment16Bits[2]; + + int qpYAc; + int qpYDc; + int qpChDc; + int qpChAc; + + unsigned int activeRefIdx[3]; + unsigned int resetFrameContext; + unsigned int mcomp_filter_type; + unsigned int mbRefLfDelta[4]; + unsigned int mbModeLfDelta[2]; + unsigned int frameTagSize; + unsigned int offsetToDctParts; + unsigned int reserved128Bits[4]; + +} CUVIDVP9PICPARAMS; + + +/*! + * \struct CUVIDPICPARAMS + * Picture Parameters for Decoding + */ +typedef struct _CUVIDPICPARAMS +{ + int PicWidthInMbs; /**< Coded Frame Size */ + int FrameHeightInMbs; /**< Coded Frame Height */ + int CurrPicIdx; /**< Output index of the current picture */ + int field_pic_flag; /**< 0=frame picture, 1=field picture */ + int bottom_field_flag; /**< 0=top field, 1=bottom field (ignored if field_pic_flag=0) */ + int second_field; /**< Second field of a complementary field pair */ + // Bitstream data + unsigned int nBitstreamDataLen; /**< Number of bytes in bitstream data buffer */ + const unsigned char *pBitstreamData; /**< Ptr to bitstream data for this picture (slice-layer) */ + unsigned int nNumSlices; /**< Number of slices in this picture */ + const unsigned int *pSliceDataOffsets; /**< nNumSlices entries, contains offset of each slice within the bitstream data buffer */ + int ref_pic_flag; /**< This picture is a reference picture */ + int intra_pic_flag; /**< This picture is entirely intra coded */ + unsigned int Reserved[30]; /**< Reserved for future use */ + // Codec-specific data + union { + CUVIDMPEG2PICPARAMS mpeg2; /**< Also used for MPEG-1 */ + CUVIDH264PICPARAMS h264; + CUVIDVC1PICPARAMS vc1; + CUVIDMPEG4PICPARAMS mpeg4; + CUVIDJPEGPICPARAMS jpeg; + CUVIDHEVCPICPARAMS hevc; + CUVIDVP8PICPARAMS vp8; + CUVIDVP9PICPARAMS vp9; + unsigned int CodecReserved[1024]; + } CodecSpecific; +} CUVIDPICPARAMS; + + +/*! + * \struct CUVIDPROCPARAMS + * Picture Parameters for Postprocessing + */ +typedef struct _CUVIDPROCPARAMS +{ + int progressive_frame; /**< Input is progressive (deinterlace_mode will be ignored) */ + int second_field; /**< Output the second field (ignored if deinterlace mode is Weave) */ + int top_field_first; /**< Input frame is top field first (1st field is top, 2nd field is bottom) */ + int unpaired_field; /**< Input only contains one field (2nd field is invalid) */ + // The fields below are used for raw YUV input + unsigned int reserved_flags; /**< Reserved for future use (set to zero) */ + unsigned int reserved_zero; /**< Reserved (set to zero) */ + unsigned long long raw_input_dptr; /**< Input CUdeviceptr for raw YUV extensions */ + unsigned int raw_input_pitch; /**< pitch in bytes of raw YUV input (should be aligned appropriately) */ + unsigned int raw_input_format; /**< Reserved for future use (set to zero) */ + unsigned long long raw_output_dptr; /**< Reserved for future use (set to zero) */ + unsigned int raw_output_pitch; /**< Reserved for future use (set to zero) */ + unsigned int Reserved[48]; + void *Reserved3[3]; +} CUVIDPROCPARAMS; + + +/** + * + * In order to minimize decode latencies, there should be always at least 2 pictures in the decode + * queue at any time, in order to make sure that all decode engines are always busy. + * + * Overall data flow: + * - cuvidCreateDecoder(...) + * For each picture: + * - cuvidDecodePicture(N) + * - cuvidMapVideoFrame(N-4) + * - do some processing in cuda + * - cuvidUnmapVideoFrame(N-4) + * - cuvidDecodePicture(N+1) + * - cuvidMapVideoFrame(N-3) + * ... + * - cuvidDestroyDecoder(...) + * + * NOTE: + * - When the cuda context is created from a D3D device, the D3D device must also be created + * with the D3DCREATE_MULTITHREADED flag. + * - There is a limit to how many pictures can be mapped simultaneously (ulNumOutputSurfaces) + * - cuVidDecodePicture may block the calling thread if there are too many pictures pending + * in the decode queue + */ + +/** + * \fn CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci) + * Create the decoder object + */ +typedef CUresult CUDAAPI tcuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci); + +/** + * \fn CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder) + * Destroy the decoder object + */ +typedef CUresult CUDAAPI tcuvidDestroyDecoder(CUvideodecoder hDecoder); + +/** + * \fn CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams) + * Decode a single picture (field or frame) + */ +typedef CUresult CUDAAPI tcuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams); + + +#if !defined(__CUVID_DEVPTR64) || defined(__CUVID_INTERNAL) +/** + * \fn CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, unsigned int *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP); + * Post-process and map a video frame for use in cuda + */ +typedef CUresult CUDAAPI tcuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, + unsigned int *pDevPtr, unsigned int *pPitch, + CUVIDPROCPARAMS *pVPP); + +/** + * \fn CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr) + * Unmap a previously mapped video frame + */ +typedef CUresult CUDAAPI tcuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr); +#endif + +#if defined(WIN64) || defined(_WIN64) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +/** + * \fn CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP); + * map a video frame + */ +typedef CUresult CUDAAPI tcuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr, + unsigned int *pPitch, CUVIDPROCPARAMS *pVPP); + +/** + * \fn CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr); + * Unmap a previously mapped video frame + */ +typedef CUresult CUDAAPI tcuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr); + +#if defined(__CUVID_DEVPTR64) && !defined(__CUVID_INTERNAL) +#define tcuvidMapVideoFrame tcuvidMapVideoFrame64 +#define tcuvidUnmapVideoFrame tcuvidUnmapVideoFrame64 +#endif +#endif + + +/** + * + * Context-locking: to facilitate multi-threaded implementations, the following 4 functions + * provide a simple mutex-style host synchronization. If a non-NULL context is specified + * in CUVIDDECODECREATEINFO, the codec library will acquire the mutex associated with the given + * context before making any cuda calls. + * A multi-threaded application could create a lock associated with a context handle so that + * multiple threads can safely share the same cuda context: + * - use cuCtxPopCurrent immediately after context creation in order to create a 'floating' context + * that can be passed to cuvidCtxLockCreate. + * - When using a floating context, all cuda calls should only be made within a cuvidCtxLock/cuvidCtxUnlock section. + * + * NOTE: This is a safer alternative to cuCtxPushCurrent and cuCtxPopCurrent, and is not related to video + * decoder in any way (implemented as a critical section associated with cuCtx{Push|Pop}Current calls). +*/ + +/** + * \fn CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx) + */ +typedef CUresult CUDAAPI tcuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx); + +/** + * \fn CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck) + */ +typedef CUresult CUDAAPI tcuvidCtxLockDestroy(CUvideoctxlock lck); + +/** + * \fn CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags) + */ +typedef CUresult CUDAAPI tcuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags); + +/** + * \fn CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags) + */ +typedef CUresult CUDAAPI tcuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags); + +/** @} */ /* End VIDEO_DECODER */ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif // __CUDA_VIDEO_H__ diff --git a/compat/cuda/dynlink_loader.h b/compat/cuda/dynlink_loader.h new file mode 100644 index 0000000..33f23af --- /dev/null +++ b/compat/cuda/dynlink_loader.h @@ -0,0 +1,254 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2016 + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef AV_COMPAT_CUDA_DYNLINK_LOADER_H +#define AV_COMPAT_CUDA_DYNLINK_LOADER_H + +#include "compat/cuda/dynlink_cuda.h" +#include "compat/cuda/dynlink_nvcuvid.h" +#include "compat/nvenc/nvEncodeAPI.h" +#include "compat/w32dlfcn.h" + +#include "libavutil/log.h" +#include "libavutil/error.h" + +#if defined(_WIN32) +# define LIB_HANDLE HMODULE +#else +# define LIB_HANDLE void* +#endif + +#if defined(_WIN32) || defined(__CYGWIN__) +# define CUDA_LIBNAME "nvcuda.dll" +# define NVCUVID_LIBNAME "nvcuvid.dll" +# if ARCH_X86_64 +# define NVENC_LIBNAME "nvEncodeAPI64.dll" +# else +# define NVENC_LIBNAME "nvEncodeAPI.dll" +# endif +#else +# define CUDA_LIBNAME "libcuda.so.1" +# define NVCUVID_LIBNAME "libnvcuvid.so.1" +# define NVENC_LIBNAME "libnvidia-encode.so.1" +#endif + +#define LOAD_LIBRARY(l, path) \ + do { \ + if (!((l) = dlopen(path, RTLD_LAZY))) { \ + av_log(NULL, AV_LOG_ERROR, "Cannot load %s\n", path); \ + ret = AVERROR_UNKNOWN; \ + goto error; \ + } \ + av_log(NULL, AV_LOG_TRACE, "Loaded lib: %s\n", path); \ + } while (0) + +#define LOAD_SYMBOL(fun, symbol) \ + do { \ + if (!((f->fun) = dlsym(f->lib, symbol))) { \ + av_log(NULL, AV_LOG_ERROR, "Cannot load %s\n", symbol); \ + ret = AVERROR_UNKNOWN; \ + goto error; \ + } \ + av_log(NULL, AV_LOG_TRACE, "Loaded sym: %s\n", symbol); \ + } while (0) + +#define GENERIC_LOAD_FUNC_PREAMBLE(T, n, N) \ + T *f; \ + int ret; \ + \ + n##_free_functions(functions); \ + \ + f = *functions = av_mallocz(sizeof(*f)); \ + if (!f) \ + return AVERROR(ENOMEM); \ + \ + LOAD_LIBRARY(f->lib, N); + +#define GENERIC_LOAD_FUNC_FINALE(n) \ + return 0; \ +error: \ + n##_free_functions(functions); \ + return ret; + +#define GENERIC_FREE_FUNC() \ + if (!functions) \ + return; \ + if (*functions && (*functions)->lib) \ + dlclose((*functions)->lib); \ + av_freep(functions); + +#ifdef AV_COMPAT_DYNLINK_CUDA_H +typedef struct CudaFunctions { + tcuInit *cuInit; + tcuDeviceGetCount *cuDeviceGetCount; + tcuDeviceGet *cuDeviceGet; + tcuDeviceGetName *cuDeviceGetName; + tcuDeviceComputeCapability *cuDeviceComputeCapability; + tcuCtxCreate_v2 *cuCtxCreate; + tcuCtxPushCurrent_v2 *cuCtxPushCurrent; + tcuCtxPopCurrent_v2 *cuCtxPopCurrent; + tcuCtxDestroy_v2 *cuCtxDestroy; + tcuMemAlloc_v2 *cuMemAlloc; + tcuMemFree_v2 *cuMemFree; + tcuMemcpy2D_v2 *cuMemcpy2D; + tcuGetErrorName *cuGetErrorName; + tcuGetErrorString *cuGetErrorString; + + LIB_HANDLE lib; +} CudaFunctions; +#else +typedef struct CudaFunctions CudaFunctions; +#endif + +typedef struct CuvidFunctions { + tcuvidCreateDecoder *cuvidCreateDecoder; + tcuvidDestroyDecoder *cuvidDestroyDecoder; + tcuvidDecodePicture *cuvidDecodePicture; + tcuvidMapVideoFrame *cuvidMapVideoFrame; + tcuvidUnmapVideoFrame *cuvidUnmapVideoFrame; + tcuvidCtxLockCreate *cuvidCtxLockCreate; + tcuvidCtxLockDestroy *cuvidCtxLockDestroy; + tcuvidCtxLock *cuvidCtxLock; + tcuvidCtxUnlock *cuvidCtxUnlock; + + tcuvidCreateVideoSource *cuvidCreateVideoSource; + tcuvidCreateVideoSourceW *cuvidCreateVideoSourceW; + tcuvidDestroyVideoSource *cuvidDestroyVideoSource; + tcuvidSetVideoSourceState *cuvidSetVideoSourceState; + tcuvidGetVideoSourceState *cuvidGetVideoSourceState; + tcuvidGetSourceVideoFormat *cuvidGetSourceVideoFormat; + tcuvidGetSourceAudioFormat *cuvidGetSourceAudioFormat; + tcuvidCreateVideoParser *cuvidCreateVideoParser; + tcuvidParseVideoData *cuvidParseVideoData; + tcuvidDestroyVideoParser *cuvidDestroyVideoParser; + + LIB_HANDLE lib; +} CuvidFunctions; + +typedef struct NvencFunctions { + NVENCSTATUS (NVENCAPI *NvEncodeAPICreateInstance)(NV_ENCODE_API_FUNCTION_LIST *functionList); + NVENCSTATUS (NVENCAPI *NvEncodeAPIGetMaxSupportedVersion)(uint32_t* version); + + LIB_HANDLE lib; +} NvencFunctions; + +#ifdef AV_COMPAT_DYNLINK_CUDA_H +static inline void cuda_free_functions(CudaFunctions **functions) +{ + GENERIC_FREE_FUNC(); +} +#endif + +static inline void cuvid_free_functions(CuvidFunctions **functions) +{ + GENERIC_FREE_FUNC(); +} + +static inline void nvenc_free_functions(NvencFunctions **functions) +{ + GENERIC_FREE_FUNC(); +} + +#ifdef AV_COMPAT_DYNLINK_CUDA_H +static inline int cuda_load_functions(CudaFunctions **functions) +{ + GENERIC_LOAD_FUNC_PREAMBLE(CudaFunctions, cuda, CUDA_LIBNAME); + + LOAD_SYMBOL(cuInit, "cuInit"); + LOAD_SYMBOL(cuDeviceGetCount, "cuDeviceGetCount"); + LOAD_SYMBOL(cuDeviceGet, "cuDeviceGet"); + LOAD_SYMBOL(cuDeviceGetName, "cuDeviceGetName"); + LOAD_SYMBOL(cuDeviceComputeCapability, "cuDeviceComputeCapability"); + LOAD_SYMBOL(cuCtxCreate, "cuCtxCreate_v2"); + LOAD_SYMBOL(cuCtxPushCurrent, "cuCtxPushCurrent_v2"); + LOAD_SYMBOL(cuCtxPopCurrent, "cuCtxPopCurrent_v2"); + LOAD_SYMBOL(cuCtxDestroy, "cuCtxDestroy_v2"); + LOAD_SYMBOL(cuMemAlloc, "cuMemAlloc_v2"); + LOAD_SYMBOL(cuMemFree, "cuMemFree_v2"); + LOAD_SYMBOL(cuMemcpy2D, "cuMemcpy2D_v2"); + LOAD_SYMBOL(cuGetErrorName, "cuGetErrorName"); + LOAD_SYMBOL(cuGetErrorString, "cuGetErrorString"); + + GENERIC_LOAD_FUNC_FINALE(cuda); +} +#endif + +static inline int cuvid_load_functions(CuvidFunctions **functions) +{ + GENERIC_LOAD_FUNC_PREAMBLE(CuvidFunctions, cuvid, NVCUVID_LIBNAME); + + LOAD_SYMBOL(cuvidCreateDecoder, "cuvidCreateDecoder"); + LOAD_SYMBOL(cuvidDestroyDecoder, "cuvidDestroyDecoder"); + LOAD_SYMBOL(cuvidDecodePicture, "cuvidDecodePicture"); +#ifdef __CUVID_DEVPTR64 + LOAD_SYMBOL(cuvidMapVideoFrame, "cuvidMapVideoFrame64"); + LOAD_SYMBOL(cuvidUnmapVideoFrame, "cuvidUnmapVideoFrame64"); +#else + LOAD_SYMBOL(cuvidMapVideoFrame, "cuvidMapVideoFrame"); + LOAD_SYMBOL(cuvidUnmapVideoFrame, "cuvidUnmapVideoFrame"); +#endif + LOAD_SYMBOL(cuvidCtxLockCreate, "cuvidCtxLockCreate"); + LOAD_SYMBOL(cuvidCtxLockDestroy, "cuvidCtxLockDestroy"); + LOAD_SYMBOL(cuvidCtxLock, "cuvidCtxLock"); + LOAD_SYMBOL(cuvidCtxUnlock, "cuvidCtxUnlock"); + + LOAD_SYMBOL(cuvidCreateVideoSource, "cuvidCreateVideoSource"); + LOAD_SYMBOL(cuvidCreateVideoSourceW, "cuvidCreateVideoSourceW"); + LOAD_SYMBOL(cuvidDestroyVideoSource, "cuvidDestroyVideoSource"); + LOAD_SYMBOL(cuvidSetVideoSourceState, "cuvidSetVideoSourceState"); + LOAD_SYMBOL(cuvidGetVideoSourceState, "cuvidGetVideoSourceState"); + LOAD_SYMBOL(cuvidGetSourceVideoFormat, "cuvidGetSourceVideoFormat"); + LOAD_SYMBOL(cuvidGetSourceAudioFormat, "cuvidGetSourceAudioFormat"); + LOAD_SYMBOL(cuvidCreateVideoParser, "cuvidCreateVideoParser"); + LOAD_SYMBOL(cuvidParseVideoData, "cuvidParseVideoData"); + LOAD_SYMBOL(cuvidDestroyVideoParser, "cuvidDestroyVideoParser"); + + GENERIC_LOAD_FUNC_FINALE(cuvid); +} + +static inline int nvenc_load_functions(NvencFunctions **functions) +{ + GENERIC_LOAD_FUNC_PREAMBLE(NvencFunctions, nvenc, NVENC_LIBNAME); + + LOAD_SYMBOL(NvEncodeAPICreateInstance, "NvEncodeAPICreateInstance"); + LOAD_SYMBOL(NvEncodeAPIGetMaxSupportedVersion, "NvEncodeAPIGetMaxSupportedVersion"); + + GENERIC_LOAD_FUNC_FINALE(nvenc); +} + +#undef GENERIC_LOAD_FUNC_PREAMBLE +#undef LOAD_LIBRARY +#undef LOAD_SYMBOL +#undef GENERIC_LOAD_FUNC_FINALE +#undef GENERIC_FREE_FUNC +#undef CUDA_LIBNAME +#undef NVCUVID_LIBNAME +#undef NVENC_LIBNAME +#undef LIB_HANDLE + +#endif + diff --git a/compat/cuda/dynlink_nvcuvid.h b/compat/cuda/dynlink_nvcuvid.h new file mode 100644 index 0000000..53e0a7b --- /dev/null +++ b/compat/cuda/dynlink_nvcuvid.h @@ -0,0 +1,316 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2010-2016 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/** + * \file nvcuvid.h + * NvCuvid API provides Video Decoding interface to NVIDIA GPU devices. + * \date 2015-2015 + * This file contains the interface constants, structure definitions and function prototypes. + */ + +#if !defined(__NVCUVID_H__) +#define __NVCUVID_H__ + +#include "compat/cuda/dynlink_cuviddec.h" + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +//////////////////////////////////////////////////////////////////////////////////////////////// +// +// High-level helper APIs for video sources +// + +typedef void *CUvideosource; +typedef void *CUvideoparser; +typedef long long CUvideotimestamp; + +/** + * \addtogroup VIDEO_PARSER Video Parser + * @{ + */ + +/*! + * \enum cudaVideoState + * Video Source State + */ +typedef enum { + cudaVideoState_Error = -1, /**< Error state (invalid source) */ + cudaVideoState_Stopped = 0, /**< Source is stopped (or reached end-of-stream) */ + cudaVideoState_Started = 1 /**< Source is running and delivering data */ +} cudaVideoState; + +/*! + * \enum cudaAudioCodec + * Audio compression + */ +typedef enum { + cudaAudioCodec_MPEG1=0, /**< MPEG-1 Audio */ + cudaAudioCodec_MPEG2, /**< MPEG-2 Audio */ + cudaAudioCodec_MP3, /**< MPEG-1 Layer III Audio */ + cudaAudioCodec_AC3, /**< Dolby Digital (AC3) Audio */ + cudaAudioCodec_LPCM /**< PCM Audio */ +} cudaAudioCodec; + +/*! + * \struct CUVIDEOFORMAT + * Video format + */ +typedef struct +{ + cudaVideoCodec codec; /**< Compression format */ + /** + * frame rate = numerator / denominator (for example: 30000/1001) + */ + struct { + unsigned int numerator; /**< frame rate numerator (0 = unspecified or variable frame rate) */ + unsigned int denominator; /**< frame rate denominator (0 = unspecified or variable frame rate) */ + } frame_rate; + unsigned char progressive_sequence; /**< 0=interlaced, 1=progressive */ + unsigned char bit_depth_luma_minus8; /**< high bit depth Luma */ + unsigned char bit_depth_chroma_minus8; /**< high bit depth Chroma */ + unsigned char reserved1; /**< Reserved for future use */ + unsigned int coded_width; /**< coded frame width */ + unsigned int coded_height; /**< coded frame height */ + /** + * area of the frame that should be displayed + * typical example: + * coded_width = 1920, coded_height = 1088 + * display_area = { 0,0,1920,1080 } + */ + struct { + int left; /**< left position of display rect */ + int top; /**< top position of display rect */ + int right; /**< right position of display rect */ + int bottom; /**< bottom position of display rect */ + } display_area; + cudaVideoChromaFormat chroma_format; /**< Chroma format */ + unsigned int bitrate; /**< video bitrate (bps, 0=unknown) */ + /** + * Display Aspect Ratio = x:y (4:3, 16:9, etc) + */ + struct { + int x; + int y; + } display_aspect_ratio; + /** + * Video Signal Description + */ + struct { + unsigned char video_format : 3; + unsigned char video_full_range_flag : 1; + unsigned char reserved_zero_bits : 4; + unsigned char color_primaries; + unsigned char transfer_characteristics; + unsigned char matrix_coefficients; + } video_signal_description; + unsigned int seqhdr_data_length; /**< Additional bytes following (CUVIDEOFORMATEX) */ +} CUVIDEOFORMAT; + +/*! + * \struct CUVIDEOFORMATEX + * Video format including raw sequence header information + */ +typedef struct +{ + CUVIDEOFORMAT format; + unsigned char raw_seqhdr_data[1024]; +} CUVIDEOFORMATEX; + +/*! + * \struct CUAUDIOFORMAT + * Audio Formats + */ +typedef struct +{ + cudaAudioCodec codec; /**< Compression format */ + unsigned int channels; /**< number of audio channels */ + unsigned int samplespersec; /**< sampling frequency */ + unsigned int bitrate; /**< For uncompressed, can also be used to determine bits per sample */ + unsigned int reserved1; /**< Reserved for future use */ + unsigned int reserved2; /**< Reserved for future use */ +} CUAUDIOFORMAT; + + +/*! + * \enum CUvideopacketflags + * Data packet flags + */ +typedef enum { + CUVID_PKT_ENDOFSTREAM = 0x01, /**< Set when this is the last packet for this stream */ + CUVID_PKT_TIMESTAMP = 0x02, /**< Timestamp is valid */ + CUVID_PKT_DISCONTINUITY = 0x04 /**< Set when a discontinuity has to be signalled */ +} CUvideopacketflags; + +/*! + * \struct CUVIDSOURCEDATAPACKET + * Data Packet + */ +typedef struct _CUVIDSOURCEDATAPACKET +{ + tcu_ulong flags; /**< Combination of CUVID_PKT_XXX flags */ + tcu_ulong payload_size; /**< number of bytes in the payload (may be zero if EOS flag is set) */ + const unsigned char *payload; /**< Pointer to packet payload data (may be NULL if EOS flag is set) */ + CUvideotimestamp timestamp; /**< Presentation timestamp (10MHz clock), only valid if CUVID_PKT_TIMESTAMP flag is set */ +} CUVIDSOURCEDATAPACKET; + +// Callback for packet delivery +typedef int (CUDAAPI *PFNVIDSOURCECALLBACK)(void *, CUVIDSOURCEDATAPACKET *); + +/*! + * \struct CUVIDSOURCEPARAMS + * Source Params + */ +typedef struct _CUVIDSOURCEPARAMS +{ + unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */ + unsigned int uReserved1[7]; /**< Reserved for future use - set to zero */ + void *pUserData; /**< Parameter passed in to the data handlers */ + PFNVIDSOURCECALLBACK pfnVideoDataHandler; /**< Called to deliver audio packets */ + PFNVIDSOURCECALLBACK pfnAudioDataHandler; /**< Called to deliver video packets */ + void *pvReserved2[8]; /**< Reserved for future use - set to NULL */ +} CUVIDSOURCEPARAMS; + +/*! + * \enum CUvideosourceformat_flags + * CUvideosourceformat_flags + */ +typedef enum { + CUVID_FMT_EXTFORMATINFO = 0x100 /**< Return extended format structure (CUVIDEOFORMATEX) */ +} CUvideosourceformat_flags; + +#if !defined(__APPLE__) +/** + * \fn CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams) + * Create Video Source + */ +typedef CUresult CUDAAPI tcuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams); + +/** + * \fn CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams) + * Create Video Source + */ +typedef CUresult CUDAAPI tcuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams); + +/** + * \fn CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj) + * Destroy Video Source + */ +typedef CUresult CUDAAPI tcuvidDestroyVideoSource(CUvideosource obj); + +/** + * \fn CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state) + * Set Video Source state + */ +typedef CUresult CUDAAPI tcuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state); + +/** + * \fn cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj) + * Get Video Source state + */ +typedef cudaVideoState CUDAAPI tcuvidGetVideoSourceState(CUvideosource obj); + +/** + * \fn CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags) + * Get Video Source Format + */ +typedef CUresult CUDAAPI tcuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags); + +/** + * \fn CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags) + * Set Video Source state + */ +typedef CUresult CUDAAPI tcuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags); + +#endif + +/** + * \struct CUVIDPARSERDISPINFO + */ +typedef struct _CUVIDPARSERDISPINFO +{ + int picture_index; /**< */ + int progressive_frame; /**< */ + int top_field_first; /**< */ + int repeat_first_field; /**< Number of additional fields (1=ivtc, 2=frame doubling, 4=frame tripling, -1=unpaired field) */ + CUvideotimestamp timestamp; /**< */ +} CUVIDPARSERDISPINFO; + +// +// Parser callbacks +// The parser will call these synchronously from within cuvidParseVideoData(), whenever a picture is ready to +// be decoded and/or displayed. +// +typedef int (CUDAAPI *PFNVIDSEQUENCECALLBACK)(void *, CUVIDEOFORMAT *); +typedef int (CUDAAPI *PFNVIDDECODECALLBACK)(void *, CUVIDPICPARAMS *); +typedef int (CUDAAPI *PFNVIDDISPLAYCALLBACK)(void *, CUVIDPARSERDISPINFO *); + +/** + * \struct CUVIDPARSERPARAMS + */ +typedef struct _CUVIDPARSERPARAMS +{ + cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */ + unsigned int ulMaxNumDecodeSurfaces; /**< Max # of decode surfaces (parser will cycle through these) */ + unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */ + unsigned int ulErrorThreshold; /**< % Error threshold (0-100) for calling pfnDecodePicture (100=always call pfnDecodePicture even if picture bitstream is fully corrupted) */ + unsigned int ulMaxDisplayDelay; /**< Max display queue delay (improves pipelining of decode with display) - 0=no delay (recommended values: 2..4) */ + unsigned int uReserved1[5]; /**< Reserved for future use - set to 0 */ + void *pUserData; /**< User data for callbacks */ + PFNVIDSEQUENCECALLBACK pfnSequenceCallback; /**< Called before decoding frames and/or whenever there is a format change */ + PFNVIDDECODECALLBACK pfnDecodePicture; /**< Called when a picture is ready to be decoded (decode order) */ + PFNVIDDISPLAYCALLBACK pfnDisplayPicture; /**< Called whenever a picture is ready to be displayed (display order) */ + void *pvReserved2[7]; /**< Reserved for future use - set to NULL */ + CUVIDEOFORMATEX *pExtVideoInfo; /**< [Optional] sequence header data from system layer */ +} CUVIDPARSERPARAMS; + +/** + * \fn CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams) + */ +typedef CUresult CUDAAPI tcuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams); + +/** + * \fn CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket) + */ +typedef CUresult CUDAAPI tcuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket); + +/** + * \fn CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj) + */ +typedef CUresult CUDAAPI tcuvidDestroyVideoParser(CUvideoparser obj); + +/** @} */ /* END VIDEO_PARSER */ +//////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif // __NVCUVID_H__ + + diff --git a/compat/dispatch_semaphore/semaphore.h b/compat/dispatch_semaphore/semaphore.h index 2461daf..008c637 100644 --- a/compat/dispatch_semaphore/semaphore.h +++ b/compat/dispatch_semaphore/semaphore.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/compat/float/float.h b/compat/float/float.h index c69f728..1f0d3ab 100644 --- a/compat/float/float.h +++ b/compat/float/float.h @@ -1,20 +1,20 @@ /* * Work around broken floating point limits on some systems. * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/compat/float/limits.h b/compat/float/limits.h index 9150bc8..7ea374a 100644 --- a/compat/float/limits.h +++ b/compat/float/limits.h @@ -1,20 +1,20 @@ /* * Work around broken floating point limits on some systems. * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/compat/getopt.c b/compat/getopt.c index b7adf60..41a641f 100644 --- a/compat/getopt.c +++ b/compat/getopt.c @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -44,7 +44,7 @@ static int getopt(int argc, char *argv[], char *opts) int c; char *cp; - if (sp == 1) + if (sp == 1) { if (optind >= argc || argv[optind][0] != '-' || argv[optind][1] == '\0') return EOF; @@ -52,6 +52,7 @@ static int getopt(int argc, char *argv[], char *opts) optind++; return EOF; } + } optopt = c = argv[optind][sp]; if (c == ':' || !(cp = strchr(opts, c))) { fprintf(stderr, ": illegal option -- %c\n", c); diff --git a/compat/msvcrt/snprintf.c b/compat/msvcrt/snprintf.c index 0af7b54..c64653f 100644 --- a/compat/msvcrt/snprintf.c +++ b/compat/msvcrt/snprintf.c @@ -2,20 +2,20 @@ * C99-compatible snprintf() and vsnprintf() implementations * Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -24,10 +24,11 @@ #include <limits.h> #include <string.h> +#include "compat/va_copy.h" #include "libavutil/error.h" -#if !defined(va_copy) && defined(_MSC_VER) -#define va_copy(dst, src) ((dst) = (src)) +#if defined(__MINGW32__) +#define EOVERFLOW EFBIG #endif int avpriv_snprintf(char *s, size_t n, const char *fmt, ...) diff --git a/compat/msvcrt/snprintf.h b/compat/msvcrt/snprintf.h new file mode 100644 index 0000000..cd47953 --- /dev/null +++ b/compat/msvcrt/snprintf.h @@ -0,0 +1,38 @@ +/* + * C99-compatible snprintf() and vsnprintf() implementations + * Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef COMPAT_MSVCRT_SNPRINTF_H +#define COMPAT_MSVCRT_SNPRINTF_H + +#include <stdarg.h> +#include <stdio.h> + +int avpriv_snprintf(char *s, size_t n, const char *fmt, ...); +int avpriv_vsnprintf(char *s, size_t n, const char *fmt, va_list ap); + +#undef snprintf +#undef _snprintf +#undef vsnprintf +#define snprintf avpriv_snprintf +#define _snprintf avpriv_snprintf +#define vsnprintf avpriv_vsnprintf + +#endif /* COMPAT_MSVCRT_SNPRINTF_H */ diff --git a/compat/nvenc/nvEncodeAPI.h b/compat/nvenc/nvEncodeAPI.h new file mode 100644 index 0000000..18cf522 --- /dev/null +++ b/compat/nvenc/nvEncodeAPI.h @@ -0,0 +1,3219 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2010-2015 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/** + * \file nvEncodeAPI.h + * NvEncodeAPI provides a NVENC Video Encoding interface to NVIDIA GPU devices based on the Kepler architecture. + * \date 2011-2016 + * This file contains the interface constants, structure definitions and function prototypes. + */ + +#ifndef _NV_ENCODEAPI_H_ +#define _NV_ENCODEAPI_H_ + +#include <stdlib.h> + +#ifdef _WIN32 +#include <windows.h> +#endif + +#ifdef _MSC_VER +#ifndef _STDINT +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; +typedef unsigned short uint16_t; +#endif +#else +#include <stdint.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \addtogroup ENCODER_STRUCTURE NvEncodeAPI Data structures + * @{ + */ + +#if defined(_WIN32) || defined(__CYGWIN__) +#define NVENCAPI __stdcall +#else +#define NVENCAPI +#endif + +#ifdef _WIN32 +typedef RECT NVENC_RECT; +#else +// ========================================================================================= +#ifndef GUID +/*! + * \struct GUID + * Abstracts the GUID structure for non-windows platforms. + */ +// ========================================================================================= +typedef struct +{ + uint32_t Data1; /**< [in]: Specifies the first 8 hexadecimal digits of the GUID. */ + uint16_t Data2; /**< [in]: Specifies the first group of 4 hexadecimal digits. */ + uint16_t Data3; /**< [in]: Specifies the second group of 4 hexadecimal digits. */ + uint8_t Data4[8]; /**< [in]: Array of 8 bytes. The first 2 bytes contain the third group of 4 hexadecimal digits. + The remaining 6 bytes contain the final 12 hexadecimal digits. */ +} GUID; +#endif // GUID + +/** + * \struct _NVENC_RECT + * Defines a Rectangle. Used in ::NV_ENC_PREPROCESS_FRAME. + */ +typedef struct _NVENC_RECT +{ + uint32_t left; /**< [in]: X coordinate of the upper left corner of rectangular area to be specified. */ + uint32_t top; /**< [in]: Y coordinate of the upper left corner of the rectangular area to be specified. */ + uint32_t right; /**< [in]: X coordinate of the bottom right corner of the rectangular area to be specified. */ + uint32_t bottom; /**< [in]: Y coordinate of the bottom right corner of the rectangular area to be specified. */ +} NVENC_RECT; + +#endif // _WIN32 + +/** @} */ /* End of GUID and NVENC_RECT structure grouping*/ + +typedef void* NV_ENC_INPUT_PTR; /**< NVENCODE API input buffer */ +typedef void* NV_ENC_OUTPUT_PTR; /**< NVENCODE API output buffer*/ +typedef void* NV_ENC_REGISTERED_PTR; /**< A Resource that has been registered with NVENCODE API*/ + +#define NVENCAPI_MAJOR_VERSION 7 +#define NVENCAPI_MINOR_VERSION 0 + +#define NVENCAPI_VERSION (NVENCAPI_MAJOR_VERSION | (NVENCAPI_MINOR_VERSION << 24)) + +/** + * Macro to generate per-structure version for use with API. + */ +#define NVENCAPI_STRUCT_VERSION(ver) ((uint32_t)NVENCAPI_VERSION | ((ver)<<16) | (0x7 << 28)) + + +#define NVENC_INFINITE_GOPLENGTH 0xffffffff + +#define NV_MAX_SEQ_HDR_LEN (512) + +// ========================================================================================= +// Encode Codec GUIDS supported by the NvEncodeAPI interface. +// ========================================================================================= + +// {6BC82762-4E63-4ca4-AA85-1E50F321F6BF} +static const GUID NV_ENC_CODEC_H264_GUID = +{ 0x6bc82762, 0x4e63, 0x4ca4, { 0xaa, 0x85, 0x1e, 0x50, 0xf3, 0x21, 0xf6, 0xbf } }; + +// {790CDC88-4522-4d7b-9425-BDA9975F7603} +static const GUID NV_ENC_CODEC_HEVC_GUID = +{ 0x790cdc88, 0x4522, 0x4d7b, { 0x94, 0x25, 0xbd, 0xa9, 0x97, 0x5f, 0x76, 0x3 } }; + + + +// ========================================================================================= +// * Encode Profile GUIDS supported by the NvEncodeAPI interface. +// ========================================================================================= + +// {BFD6F8E7-233C-4341-8B3E-4818523803F4} +static const GUID NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID = +{ 0xbfd6f8e7, 0x233c, 0x4341, { 0x8b, 0x3e, 0x48, 0x18, 0x52, 0x38, 0x3, 0xf4 } }; + +// {0727BCAA-78C4-4c83-8C2F-EF3DFF267C6A} +static const GUID NV_ENC_H264_PROFILE_BASELINE_GUID = +{ 0x727bcaa, 0x78c4, 0x4c83, { 0x8c, 0x2f, 0xef, 0x3d, 0xff, 0x26, 0x7c, 0x6a } }; + +// {60B5C1D4-67FE-4790-94D5-C4726D7B6E6D} +static const GUID NV_ENC_H264_PROFILE_MAIN_GUID = +{ 0x60b5c1d4, 0x67fe, 0x4790, { 0x94, 0xd5, 0xc4, 0x72, 0x6d, 0x7b, 0x6e, 0x6d } }; + +// {E7CBC309-4F7A-4b89-AF2A-D537C92BE310} +static const GUID NV_ENC_H264_PROFILE_HIGH_GUID = +{ 0xe7cbc309, 0x4f7a, 0x4b89, { 0xaf, 0x2a, 0xd5, 0x37, 0xc9, 0x2b, 0xe3, 0x10 } }; + +// {7AC663CB-A598-4960-B844-339B261A7D52} +static const GUID NV_ENC_H264_PROFILE_HIGH_444_GUID = +{ 0x7ac663cb, 0xa598, 0x4960, { 0xb8, 0x44, 0x33, 0x9b, 0x26, 0x1a, 0x7d, 0x52 } }; + +// {40847BF5-33F7-4601-9084-E8FE3C1DB8B7} +static const GUID NV_ENC_H264_PROFILE_STEREO_GUID = +{ 0x40847bf5, 0x33f7, 0x4601, { 0x90, 0x84, 0xe8, 0xfe, 0x3c, 0x1d, 0xb8, 0xb7 } }; + +// {CE788D20-AAA9-4318-92BB-AC7E858C8D36} +static const GUID NV_ENC_H264_PROFILE_SVC_TEMPORAL_SCALABILTY = +{ 0xce788d20, 0xaaa9, 0x4318, { 0x92, 0xbb, 0xac, 0x7e, 0x85, 0x8c, 0x8d, 0x36 } }; + +// {B405AFAC-F32B-417B-89C4-9ABEED3E5978} +static const GUID NV_ENC_H264_PROFILE_PROGRESSIVE_HIGH_GUID = +{ 0xb405afac, 0xf32b, 0x417b, { 0x89, 0xc4, 0x9a, 0xbe, 0xed, 0x3e, 0x59, 0x78 } }; + +// {AEC1BD87-E85B-48f2-84C3-98BCA6285072} +static const GUID NV_ENC_H264_PROFILE_CONSTRAINED_HIGH_GUID = +{ 0xaec1bd87, 0xe85b, 0x48f2, { 0x84, 0xc3, 0x98, 0xbc, 0xa6, 0x28, 0x50, 0x72 } }; + +// {B514C39A-B55B-40fa-878F-F1253B4DFDEC} +static const GUID NV_ENC_HEVC_PROFILE_MAIN_GUID = +{ 0xb514c39a, 0xb55b, 0x40fa, { 0x87, 0x8f, 0xf1, 0x25, 0x3b, 0x4d, 0xfd, 0xec } }; + +// {fa4d2b6c-3a5b-411a-8018-0a3f5e3c9be5} +static const GUID NV_ENC_HEVC_PROFILE_MAIN10_GUID = +{ 0xfa4d2b6c, 0x3a5b, 0x411a, { 0x80, 0x18, 0x0a, 0x3f, 0x5e, 0x3c, 0x9b, 0xe5 } }; + +// For HEVC Main 444 8 bit and HEVC Main 444 10 bit profiles only +// {51ec32b5-1b4c-453c-9cbd-b616bd621341} +static const GUID NV_ENC_HEVC_PROFILE_FREXT_GUID = +{ 0x51ec32b5, 0x1b4c, 0x453c, { 0x9c, 0xbd, 0xb6, 0x16, 0xbd, 0x62, 0x13, 0x41 } }; + +// ========================================================================================= +// * Preset GUIDS supported by the NvEncodeAPI interface. +// ========================================================================================= +// {B2DFB705-4EBD-4C49-9B5F-24A777D3E587} +static const GUID NV_ENC_PRESET_DEFAULT_GUID = +{ 0xb2dfb705, 0x4ebd, 0x4c49, { 0x9b, 0x5f, 0x24, 0xa7, 0x77, 0xd3, 0xe5, 0x87 } }; + +// {60E4C59F-E846-4484-A56D-CD45BE9FDDF6} +static const GUID NV_ENC_PRESET_HP_GUID = +{ 0x60e4c59f, 0xe846, 0x4484, { 0xa5, 0x6d, 0xcd, 0x45, 0xbe, 0x9f, 0xdd, 0xf6 } }; + +// {34DBA71D-A77B-4B8F-9C3E-B6D5DA24C012} +static const GUID NV_ENC_PRESET_HQ_GUID = +{ 0x34dba71d, 0xa77b, 0x4b8f, { 0x9c, 0x3e, 0xb6, 0xd5, 0xda, 0x24, 0xc0, 0x12 } }; + +// {82E3E450-BDBB-4e40-989C-82A90DF9EF32} +static const GUID NV_ENC_PRESET_BD_GUID = +{ 0x82e3e450, 0xbdbb, 0x4e40, { 0x98, 0x9c, 0x82, 0xa9, 0xd, 0xf9, 0xef, 0x32 } }; + +// {49DF21C5-6DFA-4feb-9787-6ACC9EFFB726} +static const GUID NV_ENC_PRESET_LOW_LATENCY_DEFAULT_GUID = +{ 0x49df21c5, 0x6dfa, 0x4feb, { 0x97, 0x87, 0x6a, 0xcc, 0x9e, 0xff, 0xb7, 0x26 } }; + +// {C5F733B9-EA97-4cf9-BEC2-BF78A74FD105} +static const GUID NV_ENC_PRESET_LOW_LATENCY_HQ_GUID = +{ 0xc5f733b9, 0xea97, 0x4cf9, { 0xbe, 0xc2, 0xbf, 0x78, 0xa7, 0x4f, 0xd1, 0x5 } }; + +// {67082A44-4BAD-48FA-98EA-93056D150A58} +static const GUID NV_ENC_PRESET_LOW_LATENCY_HP_GUID = +{ 0x67082a44, 0x4bad, 0x48fa, { 0x98, 0xea, 0x93, 0x5, 0x6d, 0x15, 0xa, 0x58 } }; + +// {D5BFB716-C604-44e7-9BB8-DEA5510FC3AC} +static const GUID NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID = +{ 0xd5bfb716, 0xc604, 0x44e7, { 0x9b, 0xb8, 0xde, 0xa5, 0x51, 0xf, 0xc3, 0xac } }; + +// {149998E7-2364-411d-82EF-179888093409} +static const GUID NV_ENC_PRESET_LOSSLESS_HP_GUID = +{ 0x149998e7, 0x2364, 0x411d, { 0x82, 0xef, 0x17, 0x98, 0x88, 0x9, 0x34, 0x9 } }; + +/** + * \addtogroup ENCODER_STRUCTURE NvEncodeAPI Data structures + * @{ + */ + +/** + * Input frame encode modes + */ +typedef enum _NV_ENC_PARAMS_FRAME_FIELD_MODE +{ + NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME = 0x01, /**< Frame mode */ + NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD = 0x02, /**< Field mode */ + NV_ENC_PARAMS_FRAME_FIELD_MODE_MBAFF = 0x03 /**< MB adaptive frame/field */ +} NV_ENC_PARAMS_FRAME_FIELD_MODE; + +/** + * Rate Control Modes + */ +typedef enum _NV_ENC_PARAMS_RC_MODE +{ + NV_ENC_PARAMS_RC_CONSTQP = 0x0, /**< Constant QP mode */ + NV_ENC_PARAMS_RC_VBR = 0x1, /**< Variable bitrate mode */ + NV_ENC_PARAMS_RC_CBR = 0x2, /**< Constant bitrate mode */ + NV_ENC_PARAMS_RC_VBR_MINQP = 0x4, /**< Variable bitrate mode with MinQP */ + NV_ENC_PARAMS_RC_2_PASS_QUALITY = 0x8, /**< Multi pass encoding optimized for image quality and works only with low latency mode */ + NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP = 0x10, /**< Multi pass encoding optimized for maintaining frame size and works only with low latency mode */ + NV_ENC_PARAMS_RC_2_PASS_VBR = 0x20 /**< Multi pass VBR */ +} NV_ENC_PARAMS_RC_MODE; + +#define NV_ENC_PARAMS_RC_CBR2 NV_ENC_PARAMS_RC_CBR /**< Deprecated */ + +/** + * Input picture structure + */ +typedef enum _NV_ENC_PIC_STRUCT +{ + NV_ENC_PIC_STRUCT_FRAME = 0x01, /**< Progressive frame */ + NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM = 0x02, /**< Field encoding top field first */ + NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP = 0x03 /**< Field encoding bottom field first */ +} NV_ENC_PIC_STRUCT; + +/** + * Input picture type + */ +typedef enum _NV_ENC_PIC_TYPE +{ + NV_ENC_PIC_TYPE_P = 0x0, /**< Forward predicted */ + NV_ENC_PIC_TYPE_B = 0x01, /**< Bi-directionally predicted picture */ + NV_ENC_PIC_TYPE_I = 0x02, /**< Intra predicted picture */ + NV_ENC_PIC_TYPE_IDR = 0x03, /**< IDR picture */ + NV_ENC_PIC_TYPE_BI = 0x04, /**< Bi-directionally predicted with only Intra MBs */ + NV_ENC_PIC_TYPE_SKIPPED = 0x05, /**< Picture is skipped */ + NV_ENC_PIC_TYPE_INTRA_REFRESH = 0x06, /**< First picture in intra refresh cycle */ + NV_ENC_PIC_TYPE_UNKNOWN = 0xFF /**< Picture type unknown */ +} NV_ENC_PIC_TYPE; + +/** + * Motion vector precisions + */ +typedef enum _NV_ENC_MV_PRECISION +{ + NV_ENC_MV_PRECISION_DEFAULT = 0x0, /**<Driver selects QuarterPel motion vector precision by default*/ + NV_ENC_MV_PRECISION_FULL_PEL = 0x01, /**< FullPel motion vector precision */ + NV_ENC_MV_PRECISION_HALF_PEL = 0x02, /**< HalfPel motion vector precision */ + NV_ENC_MV_PRECISION_QUARTER_PEL = 0x03 /**< QuarterPel motion vector precision */ +} NV_ENC_MV_PRECISION; + + +/** + * Input buffer formats + */ +typedef enum _NV_ENC_BUFFER_FORMAT +{ + NV_ENC_BUFFER_FORMAT_UNDEFINED = 0x00000000, /**< Undefined buffer format */ + + NV_ENC_BUFFER_FORMAT_NV12 = 0x00000001, /**< Semi-Planar YUV [Y plane followed by interleaved UV plane] */ + NV_ENC_BUFFER_FORMAT_YV12 = 0x00000010, /**< Planar YUV [Y plane followed by V and U planes] */ + NV_ENC_BUFFER_FORMAT_IYUV = 0x00000100, /**< Planar YUV [Y plane followed by U and V planes] */ + NV_ENC_BUFFER_FORMAT_YUV444 = 0x00001000, /**< Planar YUV [Y plane followed by U and V planes] */ + NV_ENC_BUFFER_FORMAT_YUV420_10BIT = 0x00010000, /**< 10 bit Semi-Planar YUV [Y plane followed by interleaved UV plane]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */ + NV_ENC_BUFFER_FORMAT_YUV444_10BIT = 0x00100000, /**< 10 bit Planar YUV444 [Y plane followed by U and V planes]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */ + NV_ENC_BUFFER_FORMAT_ARGB = 0x01000000, /**< 8 bit Packed A8R8G8B8 */ + NV_ENC_BUFFER_FORMAT_ARGB10 = 0x02000000, /**< 10 bit Packed A2R10G10B10. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */ + NV_ENC_BUFFER_FORMAT_AYUV = 0x04000000, /**< 8 bit Packed A8Y8U8V8 */ + NV_ENC_BUFFER_FORMAT_ABGR = 0x10000000, /**< 8 bit Packed A8B8G8R8 */ + NV_ENC_BUFFER_FORMAT_ABGR10 = 0x20000000, /**< 10 bit Packed A2B10G10R10. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */ +} NV_ENC_BUFFER_FORMAT; + +#define NV_ENC_BUFFER_FORMAT_NV12_PL NV_ENC_BUFFER_FORMAT_NV12 +#define NV_ENC_BUFFER_FORMAT_YV12_PL NV_ENC_BUFFER_FORMAT_YV12 +#define NV_ENC_BUFFER_FORMAT_IYUV_PL NV_ENC_BUFFER_FORMAT_IYUV +#define NV_ENC_BUFFER_FORMAT_YUV444_PL NV_ENC_BUFFER_FORMAT_YUV444 + +/** + * Encoding levels + */ +typedef enum _NV_ENC_LEVEL +{ + NV_ENC_LEVEL_AUTOSELECT = 0, + + NV_ENC_LEVEL_H264_1 = 10, + NV_ENC_LEVEL_H264_1b = 9, + NV_ENC_LEVEL_H264_11 = 11, + NV_ENC_LEVEL_H264_12 = 12, + NV_ENC_LEVEL_H264_13 = 13, + NV_ENC_LEVEL_H264_2 = 20, + NV_ENC_LEVEL_H264_21 = 21, + NV_ENC_LEVEL_H264_22 = 22, + NV_ENC_LEVEL_H264_3 = 30, + NV_ENC_LEVEL_H264_31 = 31, + NV_ENC_LEVEL_H264_32 = 32, + NV_ENC_LEVEL_H264_4 = 40, + NV_ENC_LEVEL_H264_41 = 41, + NV_ENC_LEVEL_H264_42 = 42, + NV_ENC_LEVEL_H264_5 = 50, + NV_ENC_LEVEL_H264_51 = 51, + NV_ENC_LEVEL_H264_52 = 52, + + + NV_ENC_LEVEL_HEVC_1 = 30, + NV_ENC_LEVEL_HEVC_2 = 60, + NV_ENC_LEVEL_HEVC_21 = 63, + NV_ENC_LEVEL_HEVC_3 = 90, + NV_ENC_LEVEL_HEVC_31 = 93, + NV_ENC_LEVEL_HEVC_4 = 120, + NV_ENC_LEVEL_HEVC_41 = 123, + NV_ENC_LEVEL_HEVC_5 = 150, + NV_ENC_LEVEL_HEVC_51 = 153, + NV_ENC_LEVEL_HEVC_52 = 156, + NV_ENC_LEVEL_HEVC_6 = 180, + NV_ENC_LEVEL_HEVC_61 = 183, + NV_ENC_LEVEL_HEVC_62 = 186, + + NV_ENC_TIER_HEVC_MAIN = 0, + NV_ENC_TIER_HEVC_HIGH = 1 +} NV_ENC_LEVEL; + +/** + * Error Codes + */ +typedef enum _NVENCSTATUS +{ + /** + * This indicates that API call returned with no errors. + */ + NV_ENC_SUCCESS, + + /** + * This indicates that no encode capable devices were detected. + */ + NV_ENC_ERR_NO_ENCODE_DEVICE, + + /** + * This indicates that devices pass by the client is not supported. + */ + NV_ENC_ERR_UNSUPPORTED_DEVICE, + + /** + * This indicates that the encoder device supplied by the client is not + * valid. + */ + NV_ENC_ERR_INVALID_ENCODERDEVICE, + + /** + * This indicates that device passed to the API call is invalid. + */ + NV_ENC_ERR_INVALID_DEVICE, + + /** + * This indicates that device passed to the API call is no longer available and + * needs to be reinitialized. The clients need to destroy the current encoder + * session by freeing the allocated input output buffers and destroying the device + * and create a new encoding session. + */ + NV_ENC_ERR_DEVICE_NOT_EXIST, + + /** + * This indicates that one or more of the pointers passed to the API call + * is invalid. + */ + NV_ENC_ERR_INVALID_PTR, + + /** + * This indicates that completion event passed in ::NvEncEncodePicture() call + * is invalid. + */ + NV_ENC_ERR_INVALID_EVENT, + + /** + * This indicates that one or more of the parameter passed to the API call + * is invalid. + */ + NV_ENC_ERR_INVALID_PARAM, + + /** + * This indicates that an API call was made in wrong sequence/order. + */ + NV_ENC_ERR_INVALID_CALL, + + /** + * This indicates that the API call failed because it was unable to allocate + * enough memory to perform the requested operation. + */ + NV_ENC_ERR_OUT_OF_MEMORY, + + /** + * This indicates that the encoder has not been initialized with + * ::NvEncInitializeEncoder() or that initialization has failed. + * The client cannot allocate input or output buffers or do any encoding + * related operation before successfully initializing the encoder. + */ + NV_ENC_ERR_ENCODER_NOT_INITIALIZED, + + /** + * This indicates that an unsupported parameter was passed by the client. + */ + NV_ENC_ERR_UNSUPPORTED_PARAM, + + /** + * This indicates that the ::NvEncLockBitstream() failed to lock the output + * buffer. This happens when the client makes a non blocking lock call to + * access the output bitstream by passing NV_ENC_LOCK_BITSTREAM::doNotWait flag. + * This is not a fatal error and client should retry the same operation after + * few milliseconds. + */ + NV_ENC_ERR_LOCK_BUSY, + + /** + * This indicates that the size of the user buffer passed by the client is + * insufficient for the requested operation. + */ + NV_ENC_ERR_NOT_ENOUGH_BUFFER, + + /** + * This indicates that an invalid struct version was used by the client. + */ + NV_ENC_ERR_INVALID_VERSION, + + /** + * This indicates that ::NvEncMapInputResource() API failed to map the client + * provided input resource. + */ + NV_ENC_ERR_MAP_FAILED, + + /** + * This indicates encode driver requires more input buffers to produce an output + * bitstream. If this error is returned from ::NvEncEncodePicture() API, this + * is not a fatal error. If the client is encoding with B frames then, + * ::NvEncEncodePicture() API might be buffering the input frame for re-ordering. + * + * A client operating in synchronous mode cannot call ::NvEncLockBitstream() + * API on the output bitstream buffer if ::NvEncEncodePicture() returned the + * ::NV_ENC_ERR_NEED_MORE_INPUT error code. + * The client must continue providing input frames until encode driver returns + * ::NV_ENC_SUCCESS. After receiving ::NV_ENC_SUCCESS status the client can call + * ::NvEncLockBitstream() API on the output buffers in the same order in which + * it has called ::NvEncEncodePicture(). + */ + NV_ENC_ERR_NEED_MORE_INPUT, + + /** + * This indicates that the HW encoder is busy encoding and is unable to encode + * the input. The client should call ::NvEncEncodePicture() again after few + * milliseconds. + */ + NV_ENC_ERR_ENCODER_BUSY, + + /** + * This indicates that the completion event passed in ::NvEncEncodePicture() + * API has not been registered with encoder driver using ::NvEncRegisterAsyncEvent(). + */ + NV_ENC_ERR_EVENT_NOT_REGISTERD, + + /** + * This indicates that an unknown internal error has occurred. + */ + NV_ENC_ERR_GENERIC, + + /** + * This indicates that the client is attempting to use a feature + * that is not available for the license type for the current system. + */ + NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY, + + /** + * This indicates that the client is attempting to use a feature + * that is not implemented for the current version. + */ + NV_ENC_ERR_UNIMPLEMENTED, + + /** + * This indicates that the ::NvEncRegisterResource API failed to register the resource. + */ + NV_ENC_ERR_RESOURCE_REGISTER_FAILED, + + /** + * This indicates that the client is attempting to unregister a resource + * that has not been successfully registered. + */ + NV_ENC_ERR_RESOURCE_NOT_REGISTERED, + + /** + * This indicates that the client is attempting to unmap a resource + * that has not been successfully mapped. + */ + NV_ENC_ERR_RESOURCE_NOT_MAPPED, + +} NVENCSTATUS; + +/** + * Encode Picture encode flags. + */ +typedef enum _NV_ENC_PIC_FLAGS +{ + NV_ENC_PIC_FLAG_FORCEINTRA = 0x1, /**< Encode the current picture as an Intra picture */ + NV_ENC_PIC_FLAG_FORCEIDR = 0x2, /**< Encode the current picture as an IDR picture. + This flag is only valid when Picture type decision is taken by the Encoder + [_NV_ENC_INITIALIZE_PARAMS::enablePTD == 1]. */ + NV_ENC_PIC_FLAG_OUTPUT_SPSPPS = 0x4, /**< Write the sequence and picture header in encoded bitstream of the current picture */ + NV_ENC_PIC_FLAG_EOS = 0x8, /**< Indicates end of the input stream */ +} NV_ENC_PIC_FLAGS; + +/** + * Memory heap to allocate input and output buffers. + */ +typedef enum _NV_ENC_MEMORY_HEAP +{ + NV_ENC_MEMORY_HEAP_AUTOSELECT = 0, /**< Memory heap to be decided by the encoder driver based on the usage */ + NV_ENC_MEMORY_HEAP_VID = 1, /**< Memory heap is in local video memory */ + NV_ENC_MEMORY_HEAP_SYSMEM_CACHED = 2, /**< Memory heap is in cached system memory */ + NV_ENC_MEMORY_HEAP_SYSMEM_UNCACHED = 3 /**< Memory heap is in uncached system memory */ +} NV_ENC_MEMORY_HEAP; + + +/** + * H.264 entropy coding modes. + */ +typedef enum _NV_ENC_H264_ENTROPY_CODING_MODE +{ + NV_ENC_H264_ENTROPY_CODING_MODE_AUTOSELECT = 0x0, /**< Entropy coding mode is auto selected by the encoder driver */ + NV_ENC_H264_ENTROPY_CODING_MODE_CABAC = 0x1, /**< Entropy coding mode is CABAC */ + NV_ENC_H264_ENTROPY_CODING_MODE_CAVLC = 0x2 /**< Entropy coding mode is CAVLC */ +} NV_ENC_H264_ENTROPY_CODING_MODE; + +/** + * H.264 specific Bdirect modes + */ +typedef enum _NV_ENC_H264_BDIRECT_MODE +{ + NV_ENC_H264_BDIRECT_MODE_AUTOSELECT = 0x0, /**< BDirect mode is auto selected by the encoder driver */ + NV_ENC_H264_BDIRECT_MODE_DISABLE = 0x1, /**< Disable BDirect mode */ + NV_ENC_H264_BDIRECT_MODE_TEMPORAL = 0x2, /**< Temporal BDirect mode */ + NV_ENC_H264_BDIRECT_MODE_SPATIAL = 0x3 /**< Spatial BDirect mode */ +} NV_ENC_H264_BDIRECT_MODE; + +/** + * H.264 specific FMO usage + */ +typedef enum _NV_ENC_H264_FMO_MODE +{ + NV_ENC_H264_FMO_AUTOSELECT = 0x0, /**< FMO usage is auto selected by the encoder driver */ + NV_ENC_H264_FMO_ENABLE = 0x1, /**< Enable FMO */ + NV_ENC_H264_FMO_DISABLE = 0x2, /**< Disble FMO */ +} NV_ENC_H264_FMO_MODE; + +/** + * H.264 specific Adaptive Transform modes + */ +typedef enum _NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE +{ + NV_ENC_H264_ADAPTIVE_TRANSFORM_AUTOSELECT = 0x0, /**< Adaptive Transform 8x8 mode is auto selected by the encoder driver*/ + NV_ENC_H264_ADAPTIVE_TRANSFORM_DISABLE = 0x1, /**< Adaptive Transform 8x8 mode disabled */ + NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE = 0x2, /**< Adaptive Transform 8x8 mode should be used */ +} NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE; + +/** + * Stereo frame packing modes. + */ +typedef enum _NV_ENC_STEREO_PACKING_MODE +{ + NV_ENC_STEREO_PACKING_MODE_NONE = 0x0, /**< No Stereo packing required */ + NV_ENC_STEREO_PACKING_MODE_CHECKERBOARD = 0x1, /**< Checkerboard mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_COLINTERLEAVE = 0x2, /**< Column Interleave mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_ROWINTERLEAVE = 0x3, /**< Row Interleave mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_SIDEBYSIDE = 0x4, /**< Side-by-side mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_TOPBOTTOM = 0x5, /**< Top-Bottom mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_FRAMESEQ = 0x6 /**< Frame Sequential mode for packing stereo frames */ +} NV_ENC_STEREO_PACKING_MODE; + +/** + * Input Resource type + */ +typedef enum _NV_ENC_INPUT_RESOURCE_TYPE +{ + NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX = 0x0, /**< input resource type is a directx9 surface*/ + NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR = 0x1, /**< input resource type is a cuda device pointer surface*/ + NV_ENC_INPUT_RESOURCE_TYPE_CUDAARRAY = 0x2, /**< input resource type is a cuda array surface */ +} NV_ENC_INPUT_RESOURCE_TYPE; + +/** + * Encoder Device type + */ +typedef enum _NV_ENC_DEVICE_TYPE +{ + NV_ENC_DEVICE_TYPE_DIRECTX = 0x0, /**< encode device type is a directx9 device */ + NV_ENC_DEVICE_TYPE_CUDA = 0x1, /**< encode device type is a cuda device */ +} NV_ENC_DEVICE_TYPE; + +/** + * Encoder capabilities enumeration. + */ +typedef enum _NV_ENC_CAPS +{ + /** + * Maximum number of B-Frames supported. + */ + NV_ENC_CAPS_NUM_MAX_BFRAMES, + + /** + * Rate control modes supported. + * \n The API return value is a bitmask of the values in NV_ENC_PARAMS_RC_MODE. + */ + NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES, + + /** + * Indicates HW support for field mode encoding. + * \n 0 : Interlaced mode encoding is not supported. + * \n 1 : Interlaced field mode encoding is supported. + * \n 2 : Interlaced frame encoding and field mode encoding are both supported. + */ + NV_ENC_CAPS_SUPPORT_FIELD_ENCODING, + + /** + * Indicates HW support for monochrome mode encoding. + * \n 0 : Monochrome mode not supported. + * \n 1 : Monochrome mode supported. + */ + NV_ENC_CAPS_SUPPORT_MONOCHROME, + + /** + * Indicates HW support for FMO. + * \n 0 : FMO not supported. + * \n 1 : FMO supported. + */ + NV_ENC_CAPS_SUPPORT_FMO, + + /** + * Indicates HW capability for Quarter pel motion estimation. + * \n 0 : QuarterPel Motion Estimation not supported. + * \n 1 : QuarterPel Motion Estimation supported. + */ + NV_ENC_CAPS_SUPPORT_QPELMV, + + /** + * H.264 specific. Indicates HW support for BDirect modes. + * \n 0 : BDirect mode encoding not supported. + * \n 1 : BDirect mode encoding supported. + */ + NV_ENC_CAPS_SUPPORT_BDIRECT_MODE, + + /** + * H264 specific. Indicates HW support for CABAC entropy coding mode. + * \n 0 : CABAC entropy coding not supported. + * \n 1 : CABAC entropy coding supported. + */ + NV_ENC_CAPS_SUPPORT_CABAC, + + /** + * Indicates HW support for Adaptive Transform. + * \n 0 : Adaptive Transform not supported. + * \n 1 : Adaptive Transform supported. + */ + NV_ENC_CAPS_SUPPORT_ADAPTIVE_TRANSFORM, + + /** + * Reserved enum field. + */ + NV_ENC_CAPS_SUPPORT_RESERVED, + + /** + * Indicates HW support for encoding Temporal layers. + * \n 0 : Encoding Temporal layers not supported. + * \n 1 : Encoding Temporal layers supported. + */ + NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS, + + /** + * Indicates HW support for Hierarchical P frames. + * \n 0 : Hierarchical P frames not supported. + * \n 1 : Hierarchical P frames supported. + */ + NV_ENC_CAPS_SUPPORT_HIERARCHICAL_PFRAMES, + + /** + * Indicates HW support for Hierarchical B frames. + * \n 0 : Hierarchical B frames not supported. + * \n 1 : Hierarchical B frames supported. + */ + NV_ENC_CAPS_SUPPORT_HIERARCHICAL_BFRAMES, + + /** + * Maximum Encoding level supported (See ::NV_ENC_LEVEL for details). + */ + NV_ENC_CAPS_LEVEL_MAX, + + /** + * Minimum Encoding level supported (See ::NV_ENC_LEVEL for details). + */ + NV_ENC_CAPS_LEVEL_MIN, + + /** + * Indicates HW support for separate colour plane encoding. + * \n 0 : Separate colour plane encoding not supported. + * \n 1 : Separate colour plane encoding supported. + */ + NV_ENC_CAPS_SEPARATE_COLOUR_PLANE, + + /** + * Maximum output width supported. + */ + NV_ENC_CAPS_WIDTH_MAX, + + /** + * Maximum output height supported. + */ + NV_ENC_CAPS_HEIGHT_MAX, + + /** + * Indicates Temporal Scalability Support. + * \n 0 : Temporal SVC encoding not supported. + * \n 1 : Temporal SVC encoding supported. + */ + NV_ENC_CAPS_SUPPORT_TEMPORAL_SVC, + + /** + * Indicates Dynamic Encode Resolution Change Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Dynamic Encode Resolution Change not supported. + * \n 1 : Dynamic Encode Resolution Change supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_RES_CHANGE, + + /** + * Indicates Dynamic Encode Bitrate Change Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Dynamic Encode bitrate change not supported. + * \n 1 : Dynamic Encode bitrate change supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_BITRATE_CHANGE, + + /** + * Indicates Forcing Constant QP On The Fly Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Forcing constant QP on the fly not supported. + * \n 1 : Forcing constant QP on the fly supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_FORCE_CONSTQP, + + /** + * Indicates Dynamic rate control mode Change Support. + * \n 0 : Dynamic rate control mode change not supported. + * \n 1 : Dynamic rate control mode change supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_RCMODE_CHANGE, + + /** + * Indicates Subframe readback support for slice-based encoding. + * \n 0 : Subframe readback not supported. + * \n 1 : Subframe readback supported. + */ + NV_ENC_CAPS_SUPPORT_SUBFRAME_READBACK, + + /** + * Indicates Constrained Encoding mode support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Constrained encoding mode not supported. + * \n 1 : Constarined encoding mode supported. + * If this mode is supported client can enable this during initialisation. + * Client can then force a picture to be coded as constrained picture where + * each slice in a constrained picture will have constrained_intra_pred_flag set to 1 + * and disable_deblocking_filter_idc will be set to 2 and prediction vectors for inter + * macroblocks in each slice will be restricted to the slice region. + */ + NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING, + + /** + * Indicates Intra Refresh Mode Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Intra Refresh Mode not supported. + * \n 1 : Intra Refresh Mode supported. + */ + NV_ENC_CAPS_SUPPORT_INTRA_REFRESH, + + /** + * Indicates Custom VBV Bufer Size support. It can be used for capping frame size. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Custom VBV buffer size specification from client, not supported. + * \n 1 : Custom VBV buffer size specification from client, supported. + */ + NV_ENC_CAPS_SUPPORT_CUSTOM_VBV_BUF_SIZE, + + /** + * Indicates Dynamic Slice Mode Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Dynamic Slice Mode not supported. + * \n 1 : Dynamic Slice Mode supported. + */ + NV_ENC_CAPS_SUPPORT_DYNAMIC_SLICE_MODE, + + /** + * Indicates Reference Picture Invalidation Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Reference Picture Invalidation not supported. + * \n 1 : Reference Picture Invalidation supported. + */ + NV_ENC_CAPS_SUPPORT_REF_PIC_INVALIDATION, + + /** + * Indicates support for PreProcessing. + * The API return value is a bitmask of the values defined in ::NV_ENC_PREPROC_FLAGS + */ + NV_ENC_CAPS_PREPROC_SUPPORT, + + /** + * Indicates support Async mode. + * \n 0 : Async Encode mode not supported. + * \n 1 : Async Encode mode supported. + */ + NV_ENC_CAPS_ASYNC_ENCODE_SUPPORT, + + /** + * Maximum MBs per frame supported. + */ + NV_ENC_CAPS_MB_NUM_MAX, + + /** + * Maximum aggregate throughput in MBs per sec. + */ + NV_ENC_CAPS_MB_PER_SEC_MAX, + + /** + * Indicates HW support for YUV444 mode encoding. + * \n 0 : YUV444 mode encoding not supported. + * \n 1 : YUV444 mode encoding supported. + */ + NV_ENC_CAPS_SUPPORT_YUV444_ENCODE, + + /** + * Indicates HW support for lossless encoding. + * \n 0 : lossless encoding not supported. + * \n 1 : lossless encoding supported. + */ + NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE, + + /** + * Indicates HW support for Sample Adaptive Offset. + * \n 0 : SAO not supported. + * \n 1 : SAO encoding supported. + */ + NV_ENC_CAPS_SUPPORT_SAO, + + /** + * Indicates HW support for MEOnly Mode. + * \n 0 : MEOnly Mode not supported. + * \n 1 : MEOnly Mode supported. + */ + NV_ENC_CAPS_SUPPORT_MEONLY_MODE, + + /** + * Indicates HW support for lookahead encoding (enableLookahead=1). + * \n 0 : Lookahead not supported. + * \n 1 : Lookahead supported. + */ + NV_ENC_CAPS_SUPPORT_LOOKAHEAD, + + /** + * Indicates HW support for temporal AQ encoding (enableTemporalAQ=1). + * \n 0 : Temporal AQ not supported. + * \n 1 : Temporal AQ supported. + */ + NV_ENC_CAPS_SUPPORT_TEMPORAL_AQ, + /** + * Indicates HW support for 10 bit encoding. + * \n 0 : 10 bit encoding not supported. + * \n 1 : 10 bit encoding supported. + */ + NV_ENC_CAPS_SUPPORT_10BIT_ENCODE, + + /** + * Reserved - Not to be used by clients. + */ + NV_ENC_CAPS_EXPOSED_COUNT +} NV_ENC_CAPS; + +/** + * HEVC CU SIZE + */ +typedef enum _NV_ENC_HEVC_CUSIZE +{ + NV_ENC_HEVC_CUSIZE_AUTOSELECT = 0, + NV_ENC_HEVC_CUSIZE_8x8 = 1, + NV_ENC_HEVC_CUSIZE_16x16 = 2, + NV_ENC_HEVC_CUSIZE_32x32 = 3, + NV_ENC_HEVC_CUSIZE_64x64 = 4, +}NV_ENC_HEVC_CUSIZE; + +/** + * Input struct for querying Encoding capabilities. + */ +typedef struct _NV_ENC_CAPS_PARAM +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CAPS_PARAM_VER */ + NV_ENC_CAPS capsToQuery; /**< [in]: Specifies the encode capability to be queried. Client should pass a member for ::NV_ENC_CAPS enum. */ + uint32_t reserved[62]; /**< [in]: Reserved and must be set to 0 */ +} NV_ENC_CAPS_PARAM; + +/** NV_ENC_CAPS_PARAM struct version. */ +#define NV_ENC_CAPS_PARAM_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * Creation parameters for input buffer. + */ +typedef struct _NV_ENC_CREATE_INPUT_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_INPUT_BUFFER_VER */ + uint32_t width; /**< [in]: Input buffer width */ + uint32_t height; /**< [in]: Input buffer width */ + NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Will be removed in sdk 8.0 */ + NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Input buffer format */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + NV_ENC_INPUT_PTR inputBuffer; /**< [out]: Pointer to input buffer */ + void* pSysMemBuffer; /**< [in]: Pointer to existing sysmem buffer */ + uint32_t reserved1[57]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[63]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CREATE_INPUT_BUFFER; + +/** NV_ENC_CREATE_INPUT_BUFFER struct version. */ +#define NV_ENC_CREATE_INPUT_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * Creation parameters for output bitstream buffer. + */ +typedef struct _NV_ENC_CREATE_BITSTREAM_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_BITSTREAM_BUFFER_VER */ + uint32_t size; /**< [in]: Size of the bitstream buffer to be created */ + NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Will be removed in sdk 8.0 */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + NV_ENC_OUTPUT_PTR bitstreamBuffer; /**< [out]: Pointer to the output bitstream buffer */ + void* bitstreamBufferPtr; /**< [out]: Reserved and should not be used */ + uint32_t reserved1[58]; /**< [in]: Reserved and should be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and should be set to NULL */ +} NV_ENC_CREATE_BITSTREAM_BUFFER; + +/** NV_ENC_CREATE_BITSTREAM_BUFFER struct version. */ +#define NV_ENC_CREATE_BITSTREAM_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * Structs needed for ME only mode. + */ +typedef struct _NV_ENC_MVECTOR +{ + int16_t mvx; /**< the x component of MV in qpel units */ + int16_t mvy; /**< the y component of MV in qpel units */ +} NV_ENC_MVECTOR; + +/** + * Motion vector structure per macroblock for H264 motion estimation. + */ +typedef struct _NV_ENC_H264_MV_DATA +{ + NV_ENC_MVECTOR mv[4]; /**< up to 4 vectors for 8x8 partition */ + uint8_t mbType; /**< 0 (I), 1 (P), 2 (IPCM), 3 (B) */ + uint8_t partitionType; /**< Specifies the block partition type. 0:16x16, 1:8x8, 2:16x8, 3:8x16 */ + uint16_t reserved; /**< reserved padding for alignment */ + uint32_t mbCost; +} NV_ENC_H264_MV_DATA; + +/** + * Motion vector structure per CU for HEVC motion estimation. + */ +typedef struct _NV_ENC_HEVC_MV_DATA +{ + NV_ENC_MVECTOR mv[4]; /**< up to 4 vectors within a CU */ + uint8_t cuType; /**< 0 (I), 1(P), 2 (Skip) */ + uint8_t cuSize; /**< 0: 8x8, 1: 16x16, 2: 32x32, 3: 64x64 */ + uint8_t partitionMode; /**< The CU partition mode + 0 (2Nx2N), 1 (2NxN), 2(Nx2N), 3 (NxN), + 4 (2NxnU), 5 (2NxnD), 6(nLx2N), 7 (nRx2N) */ + uint8_t lastCUInCTB; /**< Marker to separate CUs in the current CTB from CUs in the next CTB */ +} NV_ENC_HEVC_MV_DATA; + +/** + * Creation parameters for output motion vector buffer for ME only mode. + */ +typedef struct _NV_ENC_CREATE_MV_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to NV_ENC_CREATE_MV_BUFFER_VER */ + NV_ENC_OUTPUT_PTR mvBuffer; /**< [out]: Pointer to the output motion vector buffer */ + uint32_t reserved1[255]; /**< [in]: Reserved and should be set to 0 */ + void* reserved2[63]; /**< [in]: Reserved and should be set to NULL */ +} NV_ENC_CREATE_MV_BUFFER; + +/** NV_ENC_CREATE_MV_BUFFER struct version*/ +#define NV_ENC_CREATE_MV_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * QP value for frames + */ +typedef struct _NV_ENC_QP +{ + uint32_t qpInterP; + uint32_t qpInterB; + uint32_t qpIntra; +} NV_ENC_QP; + +/** + * Rate Control Configuration Paramters + */ + typedef struct _NV_ENC_RC_PARAMS + { + uint32_t version; + NV_ENC_PARAMS_RC_MODE rateControlMode; /**< [in]: Specifies the rate control mode. Check support for various rate control modes using ::NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES caps. */ + NV_ENC_QP constQP; /**< [in]: Specifies the initial QP to be used for encoding, these values would be used for all frames if in Constant QP mode. */ + uint32_t averageBitRate; /**< [in]: Specifies the average bitrate(in bits/sec) used for encoding. */ + uint32_t maxBitRate; /**< [in]: Specifies the maximum bitrate for the encoded output. This is used for VBR and ignored for CBR mode. */ + uint32_t vbvBufferSize; /**< [in]: Specifies the VBV(HRD) buffer size. in bits. Set 0 to use the default VBV buffer size. */ + uint32_t vbvInitialDelay; /**< [in]: Specifies the VBV(HRD) initial delay in bits. Set 0 to use the default VBV initial delay .*/ + uint32_t enableMinQP :1; /**< [in]: Set this to 1 if minimum QP used for rate control. */ + uint32_t enableMaxQP :1; /**< [in]: Set this to 1 if maximum QP used for rate control. */ + uint32_t enableInitialRCQP :1; /**< [in]: Set this to 1 if user suppplied initial QP is used for rate control. */ + uint32_t enableAQ :1; /**< [in]: Set this to 1 to enable adaptive quantization (Spatial). */ + uint32_t enableExtQPDeltaMap :1; /**< [in]: Set this to 1 to enable additional QP modifier for each MB supplied by client though signed byte array pointed to by NV_ENC_PIC_PARAMS::qpDeltaMap (Not Supported when AQ(Spatial/Temporal) is enabled) */ + uint32_t enableLookahead :1; /**< [in]: Set this to 1 to enable lookahead with depth <lookaheadDepth> (if lookahead is enabled, input frames must remain available to the encoder until encode completion) */ + uint32_t disableIadapt :1; /**< [in]: Set this to 1 to disable adaptive I-frame insertion at scene cuts (only has an effect when lookahead is enabled) */ + uint32_t disableBadapt :1; /**< [in]: Set this to 1 to disable adaptive B-frame decision (only has an effect when lookahead is enabled) */ + uint32_t enableTemporalAQ :1; /**< [in]: Set this to 1 to enable temporal AQ for H.264 */ + uint32_t zeroReorderDelay :1; /**< [in]: Set this to 1 to indicate zero latency operation (no reordering delay, num_reorder_frames=0) */ + uint32_t enableNonRefP :1; /**< [in]: Set this to 1 to enable automatic insertion of non-reference P-frames (no effect if enablePTD=0) */ + uint32_t strictGOPTarget :1; /**< [in]: Set this to 1 to minimize GOP-to-GOP rate fluctuations */ + uint32_t aqStrength :4; /**< [in]: When AQ (Spatial) is enabled (i.e. NV_ENC_RC_PARAMS::enableAQ is set), this field is used to specify AQ strength. AQ strength scale is from 1 (low) - 15 (aggressive). If not set, strength is autoselected by driver. Currently supported only with h264 */ + uint32_t reservedBitFields :16; /**< [in]: Reserved bitfields and must be set to 0 */ + NV_ENC_QP minQP; /**< [in]: Specifies the minimum QP used for rate control. Client must set NV_ENC_CONFIG::enableMinQP to 1. */ + NV_ENC_QP maxQP; /**< [in]: Specifies the maximum QP used for rate control. Client must set NV_ENC_CONFIG::enableMaxQP to 1. */ + NV_ENC_QP initialRCQP; /**< [in]: Specifies the initial QP used for rate control. Client must set NV_ENC_CONFIG::enableInitialRCQP to 1. */ + uint32_t temporallayerIdxMask; /**< [in]: Specifies the temporal layers (as a bitmask) whose QPs have changed. Valid max bitmask is [2^NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS - 1] */ + uint8_t temporalLayerQP[8]; /**< [in]: Specifies the temporal layer QPs used for rate control. Temporal layer index is used as as the array index */ + uint16_t targetQuality; /**< [in]: Target CQ (Constant Quality) level for VBR mode (range 0-51 with 0-automatic) */ + uint16_t lookaheadDepth; /**< [in]: Maximum depth of lookahead with range 0-32 (only used if enableLookahead=1) */ + uint32_t reserved[9]; + } NV_ENC_RC_PARAMS; + +/** macro for constructing the version field of ::_NV_ENC_RC_PARAMS */ +#define NV_ENC_RC_PARAMS_VER NVENCAPI_STRUCT_VERSION(1) + + + +/** + * \struct _NV_ENC_CONFIG_H264_VUI_PARAMETERS + * H264 Video Usability Info parameters + */ +typedef struct _NV_ENC_CONFIG_H264_VUI_PARAMETERS +{ + uint32_t overscanInfoPresentFlag; /**< [in]: if set to 1 , it specifies that the overscanInfo is present */ + uint32_t overscanInfo; /**< [in]: Specifies the overscan info(as defined in Annex E of the ITU-T Specification). */ + uint32_t videoSignalTypePresentFlag; /**< [in]: If set to 1, it specifies that the videoFormat, videoFullRangeFlag and colourDescriptionPresentFlag are present. */ + uint32_t videoFormat; /**< [in]: Specifies the source video format(as defined in Annex E of the ITU-T Specification).*/ + uint32_t videoFullRangeFlag; /**< [in]: Specifies the output range of the luma and chroma samples(as defined in Annex E of the ITU-T Specification). */ + uint32_t colourDescriptionPresentFlag; /**< [in]: If set to 1, it specifies that the colourPrimaries, transferCharacteristics and colourMatrix are present. */ + uint32_t colourPrimaries; /**< [in]: Specifies color primaries for converting to RGB(as defined in Annex E of the ITU-T Specification) */ + uint32_t transferCharacteristics; /**< [in]: Specifies the opto-electronic transfer characteristics to use (as defined in Annex E of the ITU-T Specification) */ + uint32_t colourMatrix; /**< [in]: Specifies the matrix coefficients used in deriving the luma and chroma from the RGB primaries (as defined in Annex E of the ITU-T Specification). */ + uint32_t chromaSampleLocationFlag; /**< [in]: if set to 1 , it specifies that the chromaSampleLocationTop and chromaSampleLocationBot are present.*/ + uint32_t chromaSampleLocationTop; /**< [in]: Specifies the chroma sample location for top field(as defined in Annex E of the ITU-T Specification) */ + uint32_t chromaSampleLocationBot; /**< [in]: Specifies the chroma sample location for bottom field(as defined in Annex E of the ITU-T Specification) */ + uint32_t bitstreamRestrictionFlag; /**< [in]: if set to 1, it specifies the bitstream restriction parameters are present in the bitstream.*/ + uint32_t reserved[15]; +}NV_ENC_CONFIG_H264_VUI_PARAMETERS; + +typedef NV_ENC_CONFIG_H264_VUI_PARAMETERS NV_ENC_CONFIG_HEVC_VUI_PARAMETERS; + +/** + * \struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE + * External motion vector hint counts per block type. + */ +typedef struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE +{ + uint32_t numCandsPerBlk16x16 : 4; /**< [in]: Specifies the number of candidates per 16x16 block. */ + uint32_t numCandsPerBlk16x8 : 4; /**< [in]: Specifies the number of candidates per 16x8 block. */ + uint32_t numCandsPerBlk8x16 : 4; /**< [in]: Specifies the number of candidates per 8x16 block. */ + uint32_t numCandsPerBlk8x8 : 4; /**< [in]: Specifies the number of candidates per 8x8 block. */ + uint32_t reserved : 16; /**< [in]: Reserved for padding. */ + uint32_t reserved1[3]; /**< [in]: Reserved for future use. */ +} NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE; + + +/** + * \struct _NVENC_EXTERNAL_ME_HINT + * External Motion Vector hint structure. + */ +typedef struct _NVENC_EXTERNAL_ME_HINT +{ + int32_t mvx : 12; /**< [in]: Specifies the x component of integer pixel MV (relative to current MB) S12.0. */ + int32_t mvy : 10; /**< [in]: Specifies the y component of integer pixel MV (relative to current MB) S10.0 .*/ + int32_t refidx : 5; /**< [in]: Specifies the reference index (31=invalid). Current we support only 1 reference frame per direction for external hints, so \p refidx must be 0. */ + int32_t dir : 1; /**< [in]: Specifies the direction of motion estimation . 0=L0 1=L1.*/ + int32_t partType : 2; /**< [in]: Specifies the block partition type.0=16x16 1=16x8 2=8x16 3=8x8 (blocks in partition must be consecutive).*/ + int32_t lastofPart : 1; /**< [in]: Set to 1 for the last MV of (sub) partition */ + int32_t lastOfMB : 1; /**< [in]: Set to 1 for the last MV of macroblock. */ +} NVENC_EXTERNAL_ME_HINT; + + +/** + * \struct _NV_ENC_CONFIG_H264 + * H264 encoder configuration parameters + */ +typedef struct _NV_ENC_CONFIG_H264 +{ + uint32_t enableTemporalSVC :1; /**< [in]: Set to 1 to enable SVC temporal*/ + uint32_t enableStereoMVC :1; /**< [in]: Set to 1 to enable stereo MVC*/ + uint32_t hierarchicalPFrames :1; /**< [in]: Set to 1 to enable hierarchical PFrames */ + uint32_t hierarchicalBFrames :1; /**< [in]: Set to 1 to enable hierarchical BFrames */ + uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set to 1 to write SEI buffering period syntax in the bitstream */ + uint32_t outputPictureTimingSEI :1; /**< [in]: Set to 1 to write SEI picture timing syntax in the bitstream */ + uint32_t outputAUD :1; /**< [in]: Set to 1 to write access unit delimiter syntax in bitstream */ + uint32_t disableSPSPPS :1; /**< [in]: Set to 1 to disable writing of Sequence and Picture parameter info in bitstream */ + uint32_t outputFramePackingSEI :1; /**< [in]: Set to 1 to enable writing of frame packing arrangement SEI messages to bitstream */ + uint32_t outputRecoveryPointSEI :1; /**< [in]: Set to 1 to enable writing of recovery point SEI message */ + uint32_t enableIntraRefresh :1; /**< [in]: Set to 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */ + uint32_t enableConstrainedEncoding :1; /**< [in]: Set this to 1 to enable constrainedFrame encoding where each slice in the constarined picture is independent of other slices + Check support for constrained encoding using ::NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING caps. */ + uint32_t repeatSPSPPS :1; /**< [in]: Set to 1 to enable writing of Sequence and Picture parameter for every IDR frame */ + uint32_t enableVFR :1; /**< [in]: Set to 1 to enable variable frame rate. */ + uint32_t enableLTR :1; /**< [in]: Currently this feature is not available and must be set to 0. Set to 1 to enable LTR support and auto-mark the first */ + uint32_t qpPrimeYZeroTransformBypassFlag :1; /**< [in]: To enable lossless encode set this to 1, set QP to 0 and RC_mode to NV_ENC_PARAMS_RC_CONSTQP and profile to HIGH_444_PREDICTIVE_PROFILE. + Check support for lossless encoding using ::NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE caps. */ + uint32_t useConstrainedIntraPred :1; /**< [in]: Set 1 to enable constrained intra prediction. */ + uint32_t reservedBitFields :15; /**< [in]: Reserved bitfields and must be set to 0 */ + uint32_t level; /**< [in]: Specifies the encoding level. Client is recommended to set this to NV_ENC_LEVEL_AUTOSELECT in order to enable the NvEncodeAPI interface to select the correct level. */ + uint32_t idrPeriod; /**< [in]: Specifies the IDR interval. If not set, this is made equal to gopLength in NV_ENC_CONFIG.Low latency application client can set IDR interval to NVENC_INFINITE_GOPLENGTH so that IDR frames are not inserted automatically. */ + uint32_t separateColourPlaneFlag; /**< [in]: Set to 1 to enable 4:4:4 separate colour planes */ + uint32_t disableDeblockingFilterIDC; /**< [in]: Specifies the deblocking filter mode. Permissible value range: [0,2] */ + uint32_t numTemporalLayers; /**< [in]: Specifies max temporal layers to be used for hierarchical coding. Valid value range is [1,::NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS] */ + uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header. Currently reserved and must be set to 0. */ + uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header. Currently reserved and must be set to 0. */ + NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE adaptiveTransformMode; /**< [in]: Specifies the AdaptiveTransform Mode. Check support for AdaptiveTransform mode using ::NV_ENC_CAPS_SUPPORT_ADAPTIVE_TRANSFORM caps. */ + NV_ENC_H264_FMO_MODE fmoMode; /**< [in]: Specified the FMO Mode. Check support for FMO using ::NV_ENC_CAPS_SUPPORT_FMO caps. */ + NV_ENC_H264_BDIRECT_MODE bdirectMode; /**< [in]: Specifies the BDirect mode. Check support for BDirect mode using ::NV_ENC_CAPS_SUPPORT_BDIRECT_MODE caps.*/ + NV_ENC_H264_ENTROPY_CODING_MODE entropyCodingMode; /**< [in]: Specifies the entropy coding mode. Check support for CABAC mode using ::NV_ENC_CAPS_SUPPORT_CABAC caps. */ + NV_ENC_STEREO_PACKING_MODE stereoMode; /**< [in]: Specifies the stereo frame packing mode which is to be signalled in frame packing arrangement SEI */ + uint32_t intraRefreshPeriod; /**< [in]: Specifies the interval between successive intra refresh if enableIntrarefresh is set. Requires enableIntraRefresh to be set. + Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */ + uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */ + uint32_t maxNumRefFrames; /**< [in]: Specifies the DPB size used for encoding. Setting it to 0 will let driver use the default dpb size. + The low latency application which wants to invalidate reference frame as an error resilience tool + is recommended to use a large DPB size so that the encoder can keep old reference frames which can be used if recent + frames are invalidated. */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 MB based slices, sliceMode = 1 Byte based slices, sliceMode = 2 MB row based slices, sliceMode = 3, numSlices in Picture + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of MBs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + NV_ENC_CONFIG_H264_VUI_PARAMETERS h264VUIParameters; /**< [in]: Specifies the H264 video usability info pamameters */ + uint32_t ltrNumFrames; /**< [in]: Specifies the number of LTR frames used. + If ltrTrustMode=1, encoder will mark first numLTRFrames base layer reference frames within each IDR interval as LTR. + If ltrMarkFrame=1, ltrNumFrames specifies maximum number of ltr frames in DPB. + If ltrNumFrames value is more that DPB size(maxNumRefFrames) encoder will take decision on its own. */ + uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. + Set to 0 to disallow encoding using LTR frames until later specified. + Set to 1 to allow encoding using LTR frames unless later invalidated.*/ + uint32_t chromaFormatIDC; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input, 3 for yuv444 input. + Check support for YUV444 encoding using ::NV_ENC_CAPS_SUPPORT_YUV444_ENCODE caps.*/ + uint32_t maxTemporalLayers; /**< [in]: Specifies the max temporal layer used for hierarchical coding. */ + uint32_t reserved1[270]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG_H264; + + +/** + * \struct _NV_ENC_CONFIG_HEVC + * HEVC encoder configuration parameters to be set during initialization. + */ +typedef struct _NV_ENC_CONFIG_HEVC +{ + uint32_t level; /**< [in]: Specifies the level of the encoded bitstream.*/ + uint32_t tier; /**< [in]: Specifies the level tier of the encoded bitstream.*/ + NV_ENC_HEVC_CUSIZE minCUSize; /**< [in]: Specifies the minimum size of luma coding unit.*/ + NV_ENC_HEVC_CUSIZE maxCUSize; /**< [in]: Specifies the maximum size of luma coding unit. Currently NVENC SDK only supports maxCUSize equal to NV_ENC_HEVC_CUSIZE_32x32.*/ + uint32_t useConstrainedIntraPred :1; /**< [in]: Set 1 to enable constrained intra prediction. */ + uint32_t disableDeblockAcrossSliceBoundary :1; /**< [in]: Set 1 to disable in loop filtering across slice boundary.*/ + uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set 1 to write SEI buffering period syntax in the bitstream */ + uint32_t outputPictureTimingSEI :1; /**< [in]: Set 1 to write SEI picture timing syntax in the bitstream */ + uint32_t outputAUD :1; /**< [in]: Set 1 to write Access Unit Delimiter syntax. */ + uint32_t enableLTR :1; /**< [in]: Set 1 to enable use of long term reference pictures for inter prediction. */ + uint32_t disableSPSPPS :1; /**< [in]: Set 1 to disable VPS,SPS and PPS signalling in the bitstream. */ + uint32_t repeatSPSPPS :1; /**< [in]: Set 1 to output VPS,SPS and PPS for every IDR frame.*/ + uint32_t enableIntraRefresh :1; /**< [in]: Set 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */ + uint32_t chromaFormatIDC :2; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input, 3 for yuv444 input.*/ + uint32_t pixelBitDepthMinus8 :3; /**< [in]: Specifies pixel bit depth minus 8. Should be set to 0 for 8 bit input, 2 for 10 bit input.*/ + uint32_t reserved :18; /**< [in]: Reserved bitfields.*/ + uint32_t idrPeriod; /**< [in]: Specifies the IDR interval. If not set, this is made equal to gopLength in NV_ENC_CONFIG.Low latency application client can set IDR interval to NVENC_INFINITE_GOPLENGTH so that IDR frames are not inserted automatically. */ + uint32_t intraRefreshPeriod; /**< [in]: Specifies the interval between successive intra refresh if enableIntrarefresh is set. Requires enableIntraRefresh to be set. + Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */ + uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */ + uint32_t maxNumRefFramesInDPB; /**< [in]: Specifies the maximum number of references frames in the DPB.*/ + uint32_t ltrNumFrames; /**< [in]: Specifies the number of LTR frames used. + If ltrTrustMode=1, encoder will mark first numLTRFrames base layer reference frames within each IDR interval as LTR. + If ltrMarkFrame=1, ltrNumFrames specifies maximum number of ltr frames in DPB. + If ltrNumFrames value is more that DPB size(maxNumRefFramesInDPB) encoder will take decision on its own. */ + uint32_t vpsId; /**< [in]: Specifies the VPS id of the video parameter set. Currently reserved and must be set to 0. */ + uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header. Currently reserved and must be set to 0. */ + uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header. Currently reserved and must be set to 0. */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 CTU based slices, sliceMode = 1 Byte based slices, sliceMode = 2 CTU row based slices, sliceMode = 3, numSlices in Picture + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of CTUs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of CTU rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + uint32_t maxTemporalLayersMinus1; /**< [in]: Specifies the max temporal layer used for hierarchical coding. */ + NV_ENC_CONFIG_HEVC_VUI_PARAMETERS hevcVUIParameters; /**< [in]: Specifies the HEVC video usability info pamameters */ + uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. + Set to 0 to disallow encoding using LTR frames until later specified. + Set to 1 to allow encoding using LTR frames unless later invalidated.*/ + uint32_t reserved1[217]; /**< [in]: Reserved and must be set to 0.*/ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG_HEVC; + +/** + * \struct _NV_ENC_CODEC_CONFIG + * Codec-specific encoder configuration parameters to be set during initialization. + */ +typedef union _NV_ENC_CODEC_CONFIG +{ + NV_ENC_CONFIG_H264 h264Config; /**< [in]: Specifies the H.264-specific encoder configuration. */ + NV_ENC_CONFIG_HEVC hevcConfig; /**< [in]: Specifies the HEVC-specific encoder configuration. */ + uint32_t reserved[256]; /**< [in]: Reserved and must be set to 0 */ +} NV_ENC_CODEC_CONFIG; + + +/** + * \struct _NV_ENC_CONFIG + * Encoder configuration parameters to be set during initialization. + */ +typedef struct _NV_ENC_CONFIG +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CONFIG_VER. */ + GUID profileGUID; /**< [in]: Specifies the codec profile guid. If client specifies \p NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID the NvEncodeAPI interface will select the appropriate codec profile. */ + uint32_t gopLength; /**< [in]: Specifies the number of pictures in one GOP. Low latency application client can set goplength to NVENC_INFINITE_GOPLENGTH so that keyframes are not inserted automatically. */ + int32_t frameIntervalP; /**< [in]: Specifies the GOP pattern as follows: \p frameIntervalP = 0: I, 1: IPP, 2: IBP, 3: IBBP If goplength is set to NVENC_INFINITE_GOPLENGTH \p frameIntervalP should be set to 1. */ + uint32_t monoChromeEncoding; /**< [in]: Set this to 1 to enable monochrome encoding for this session. */ + NV_ENC_PARAMS_FRAME_FIELD_MODE frameFieldMode; /**< [in]: Specifies the frame/field mode. + Check support for field encoding using ::NV_ENC_CAPS_SUPPORT_FIELD_ENCODING caps. + Using a frameFieldMode other than NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME for RGB input is not supported. */ + NV_ENC_MV_PRECISION mvPrecision; /**< [in]: Specifies the desired motion vector prediction precision. */ + NV_ENC_RC_PARAMS rcParams; /**< [in]: Specifies the rate control parameters for the current encoding session. */ + NV_ENC_CODEC_CONFIG encodeCodecConfig; /**< [in]: Specifies the codec specific config parameters through this union. */ + uint32_t reserved [278]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG; + +/** macro for constructing the version field of ::_NV_ENC_CONFIG */ +#define NV_ENC_CONFIG_VER (NVENCAPI_STRUCT_VERSION(6) | ( 1<<31 )) + + +/** + * \struct _NV_ENC_INITIALIZE_PARAMS + * Encode Session Initialization parameters. + */ +typedef struct _NV_ENC_INITIALIZE_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_INITIALIZE_PARAMS_VER. */ + GUID encodeGUID; /**< [in]: Specifies the Encode GUID for which the encoder is being created. ::NvEncInitializeEncoder() API will fail if this is not set, or set to unsupported value. */ + GUID presetGUID; /**< [in]: Specifies the preset for encoding. If the preset GUID is set then , the preset configuration will be applied before any other parameter. */ + uint32_t encodeWidth; /**< [in]: Specifies the encode width. If not set ::NvEncInitializeEncoder() API will fail. */ + uint32_t encodeHeight; /**< [in]: Specifies the encode height. If not set ::NvEncInitializeEncoder() API will fail. */ + uint32_t darWidth; /**< [in]: Specifies the display aspect ratio Width. */ + uint32_t darHeight; /**< [in]: Specifies the display aspect ratio height. */ + uint32_t frameRateNum; /**< [in]: Specifies the numerator for frame rate used for encoding in frames per second ( Frame rate = frameRateNum / frameRateDen ). */ + uint32_t frameRateDen; /**< [in]: Specifies the denominator for frame rate used for encoding in frames per second ( Frame rate = frameRateNum / frameRateDen ). */ + uint32_t enableEncodeAsync; /**< [in]: Set this to 1 to enable asynchronous mode and is expected to use events to get picture completion notification. */ + uint32_t enablePTD; /**< [in]: Set this to 1 to enable the Picture Type Decision is be taken by the NvEncodeAPI interface. */ + uint32_t reportSliceOffsets :1; /**< [in]: Set this to 1 to enable reporting slice offsets in ::_NV_ENC_LOCK_BITSTREAM. NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync must be set to 0 to use this feature. Client must set this to 0 if NV_ENC_CONFIG_H264::sliceMode is 1 on Kepler GPUs */ + uint32_t enableSubFrameWrite :1; /**< [in]: Set this to 1 to write out available bitstream to memory at subframe intervals */ + uint32_t enableExternalMEHints :1; /**< [in]: Set to 1 to enable external ME hints for the current frame. For NV_ENC_INITIALIZE_PARAMS::enablePTD=1 with B frames, programming L1 hints is optional for B frames since Client doesn't know internal GOP structure. + NV_ENC_PIC_PARAMS::meHintRefPicDist should preferably be set with enablePTD=1. */ + uint32_t enableMEOnlyMode :1; /**< [in]: Set to 1 to enable ME Only Mode .*/ + uint32_t reservedBitFields :28; /**< [in]: Reserved bitfields and must be set to 0 */ + uint32_t privDataSize; /**< [in]: Reserved private data buffer size and must be set to 0 */ + void* privData; /**< [in]: Reserved private data buffer and must be set to NULL */ + NV_ENC_CONFIG* encodeConfig; /**< [in]: Specifies the advanced codec specific structure. If client has sent a valid codec config structure, it will override parameters set by the NV_ENC_INITIALIZE_PARAMS::presetGUID parameter. If set to NULL the NvEncodeAPI interface will use the NV_ENC_INITIALIZE_PARAMS::presetGUID to set the codec specific parameters. + Client can also optionally query the NvEncodeAPI interface to get codec specific parameters for a presetGUID using ::NvEncGetEncodePresetConfig() API. It can then modify (if required) some of the codec config parameters and send down a custom config structure as part of ::_NV_ENC_INITIALIZE_PARAMS. + Even in this case client is recommended to pass the same preset guid it has used in ::NvEncGetEncodePresetConfig() API to query the config structure; as NV_ENC_INITIALIZE_PARAMS::presetGUID. This will not override the custom config structure but will be used to determine other Encoder HW specific parameters not exposed in the API. */ + uint32_t maxEncodeWidth; /**< [in]: Maximum encode width to be used for current Encode session. + Client should allocate output buffers according to this dimension for dynamic resolution change. If set to 0, Encoder will not allow dynamic resolution change. */ + uint32_t maxEncodeHeight; /**< [in]: Maximum encode height to be allowed for current Encode session. + Client should allocate output buffers according to this dimension for dynamic resolution change. If set to 0, Encode will not allow dynamic resolution change. */ + NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE maxMEHintCountsPerBlock[2]; /**< [in]: If Client wants to pass external motion vectors in NV_ENC_PIC_PARAMS::meExternalHints buffer it must specify the maximum number of hint candidates per block per direction for the encode session. + The NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[0] is for L0 predictors and NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[1] is for L1 predictors. + This client must also set NV_ENC_INITIALIZE_PARAMS::enableExternalMEHints to 1. */ + uint32_t reserved [289]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_INITIALIZE_PARAMS; + +/** macro for constructing the version field of ::_NV_ENC_INITIALIZE_PARAMS */ +#define NV_ENC_INITIALIZE_PARAMS_VER (NVENCAPI_STRUCT_VERSION(5) | ( 1<<31 )) + + +/** + * \struct _NV_ENC_RECONFIGURE_PARAMS + * Encode Session Reconfigured parameters. + */ +typedef struct _NV_ENC_RECONFIGURE_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_RECONFIGURE_PARAMS_VER. */ + NV_ENC_INITIALIZE_PARAMS reInitEncodeParams; /**< [in]: Encoder session re-initialization parameters. */ + uint32_t resetEncoder :1; /**< [in]: This resets the rate control states and other internal encoder states. This should be used only with an IDR frame. + If NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1, encoder will force the frame type to IDR */ + uint32_t forceIDR :1; /**< [in]: Encode the current picture as an IDR picture. This flag is only valid when Picture type decision is taken by the Encoder + [_NV_ENC_INITIALIZE_PARAMS::enablePTD == 1]. */ + uint32_t reserved :30; + +}NV_ENC_RECONFIGURE_PARAMS; + +/** macro for constructing the version field of ::_NV_ENC_RECONFIGURE_PARAMS */ +#define NV_ENC_RECONFIGURE_PARAMS_VER (NVENCAPI_STRUCT_VERSION(1) | ( 1<<31 )) + +/** + * \struct _NV_ENC_PRESET_CONFIG + * Encoder preset config + */ +typedef struct _NV_ENC_PRESET_CONFIG +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PRESET_CONFIG_VER. */ + NV_ENC_CONFIG presetCfg; /**< [out]: preset config returned by the Nvidia Video Encoder interface. */ + uint32_t reserved1[255]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +}NV_ENC_PRESET_CONFIG; + +/** macro for constructing the version field of ::_NV_ENC_PRESET_CONFIG */ +#define NV_ENC_PRESET_CONFIG_VER (NVENCAPI_STRUCT_VERSION(4) | ( 1<<31 )) + + +/** + * \struct _NV_ENC_SEI_PAYLOAD + * User SEI message + */ +typedef struct _NV_ENC_SEI_PAYLOAD +{ + uint32_t payloadSize; /**< [in] SEI payload size in bytes. SEI payload must be byte aligned, as described in Annex D */ + uint32_t payloadType; /**< [in] SEI payload types and syntax can be found in Annex D of the H.264 Specification. */ + uint8_t *payload; /**< [in] pointer to user data */ +} NV_ENC_SEI_PAYLOAD; + +#define NV_ENC_H264_SEI_PAYLOAD NV_ENC_SEI_PAYLOAD + +/** + * \struct _NV_ENC_PIC_PARAMS_H264 + * H264 specific enc pic params. sent on a per frame basis. + */ +typedef struct _NV_ENC_PIC_PARAMS_H264 +{ + uint32_t displayPOCSyntax; /**< [in]: Specifies the display POC syntax This is required to be set if client is handling the picture type decision. */ + uint32_t reserved3; /**< [in]: Reserved and must be set to 0 */ + uint32_t refPicFlag; /**< [in]: Set to 1 for a reference picture. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */ + uint32_t colourPlaneId; /**< [in]: Specifies the colour plane ID associated with the current input. */ + uint32_t forceIntraRefreshWithFrameCnt; /**< [in]: Forces an intra refresh with duration equal to intraRefreshFrameCnt. + When outputRecoveryPointSEI is set this is value is used for recovery_frame_cnt in recovery point SEI message + forceIntraRefreshWithFrameCnt cannot be used if B frames are used in the GOP structure specified */ + uint32_t constrainedFrame :1; /**< [in]: Set to 1 if client wants to encode this frame with each slice completely independent of other slices in the frame. + NV_ENC_INITIALIZE_PARAMS::enableConstrainedEncoding should be set to 1 */ + uint32_t sliceModeDataUpdate :1; /**< [in]: Set to 1 if client wants to change the sliceModeData field to specify new sliceSize Parameter + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting */ + uint32_t ltrMarkFrame :1; /**< [in]: Set to 1 if client wants to mark this frame as LTR */ + uint32_t ltrUseFrames :1; /**< [in]: Set to 1 if client allows encoding this frame using the LTR frames specified in ltrFrameBitmap */ + uint32_t reservedBitFields :28; /**< [in]: Reserved bit fields and must be set to 0 */ + uint8_t* sliceTypeData; /**< [in]: Deprecated. */ + uint32_t sliceTypeArrayCnt; /**< [in]: Deprecated. */ + uint32_t seiPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in seiPayloadArray array. */ + NV_ENC_SEI_PAYLOAD* seiPayloadArray; /**< [in]: Array of SEI payloads which will be inserted for this frame. */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 MB based slices, sliceMode = 1 Byte based slices, sliceMode = 2 MB row based slices, sliceMode = 3, numSlices in Picture + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of MBs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term referenceframe index to use for marking this frame as LTR.*/ + uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the the associated bitmap of LTR frame indices when encoding this frame. */ + uint32_t ltrUsageMode; /**< [in]: Specifies additional usage constraints for encoding using LTR frames from this point further. 0: no constraints, 1: no short term refs older than current, no previous LTR frames.*/ + uint32_t reserved [243]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[62]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_PIC_PARAMS_H264; + +/** + * \struct _NV_ENC_PIC_PARAMS_HEVC + * HEVC specific enc pic params. sent on a per frame basis. + */ +typedef struct _NV_ENC_PIC_PARAMS_HEVC +{ + uint32_t displayPOCSyntax; /**< [in]: Specifies the display POC syntax This is required to be set if client is handling the picture type decision. */ + uint32_t refPicFlag; /**< [in]: Set to 1 for a reference picture. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */ + uint32_t temporalId; /**< [in]: Specifies the temporal id of the picture */ + uint32_t forceIntraRefreshWithFrameCnt; /**< [in]: Forces an intra refresh with duration equal to intraRefreshFrameCnt. + When outputRecoveryPointSEI is set this is value is used for recovery_frame_cnt in recovery point SEI message + forceIntraRefreshWithFrameCnt cannot be used if B frames are used in the GOP structure specified */ + uint32_t constrainedFrame :1; /**< [in]: Set to 1 if client wants to encode this frame with each slice completely independent of other slices in the frame. + NV_ENC_INITIALIZE_PARAMS::enableConstrainedEncoding should be set to 1 */ + uint32_t sliceModeDataUpdate :1; /**< [in]: Set to 1 if client wants to change the sliceModeData field to specify new sliceSize Parameter + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting */ + uint32_t ltrMarkFrame :1; /**< [in]: Set to 1 if client wants to mark this frame as LTR */ + uint32_t ltrUseFrames :1; /**< [in]: Set to 1 if client allows encoding this frame using the LTR frames specified in ltrFrameBitmap */ + uint32_t reservedBitFields :28; /**< [in]: Reserved bit fields and must be set to 0 */ + uint8_t* sliceTypeData; /**< [in]: Array which specifies the slice type used to force intra slice for a particular slice. Currently supported only for NV_ENC_CONFIG_H264::sliceMode == 3. + Client should allocate array of size sliceModeData where sliceModeData is specified in field of ::_NV_ENC_CONFIG_H264 + Array element with index n corresponds to nth slice. To force a particular slice to intra client should set corresponding array element to NV_ENC_SLICE_TYPE_I + all other array elements should be set to NV_ENC_SLICE_TYPE_DEFAULT */ + uint32_t sliceTypeArrayCnt; /**< [in]: Client should set this to the number of elements allocated in sliceTypeData array. If sliceTypeData is NULL then this should be set to 0 */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 CTU based slices, sliceMode = 1 Byte based slices, sliceMode = 2 CTU row based slices, sliceMode = 3, numSlices in Picture + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of CTUs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of CTU rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term reference frame index to use for marking this frame as LTR.*/ + uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the associated bitmap of LTR frame indices when encoding this frame. */ + uint32_t ltrUsageMode; /**< [in]: Specifies additional usage constraints for encoding using LTR frames from this point further. 0: no constraints, 1: no short term refs older than current, no previous LTR frames.*/ + uint32_t seiPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in seiPayloadArray array. */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0. */ + NV_ENC_SEI_PAYLOAD* seiPayloadArray; /**< [in]: Array of SEI payloads which will be inserted for this frame. */ + uint32_t reserved2 [244]; /**< [in]: Reserved and must be set to 0. */ + void* reserved3[61]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_PIC_PARAMS_HEVC; + + +/** + * Codec specific per-picture encoding parameters. + */ +typedef union _NV_ENC_CODEC_PIC_PARAMS +{ + NV_ENC_PIC_PARAMS_H264 h264PicParams; /**< [in]: H264 encode picture params. */ + NV_ENC_PIC_PARAMS_HEVC hevcPicParams; /**< [in]: HEVC encode picture params. Currently unsupported and must not to be used. */ + uint32_t reserved[256]; /**< [in]: Reserved and must be set to 0. */ +} NV_ENC_CODEC_PIC_PARAMS; + +/** + * \struct _NV_ENC_PIC_PARAMS + * Encoding parameters that need to be sent on a per frame basis. + */ +typedef struct _NV_ENC_PIC_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PIC_PARAMS_VER. */ + uint32_t inputWidth; /**< [in]: Specifies the input buffer width */ + uint32_t inputHeight; /**< [in]: Specifies the input buffer height */ + uint32_t inputPitch; /**< [in]: Specifies the input buffer pitch. If pitch value is not known, set this to inputWidth. */ + uint32_t encodePicFlags; /**< [in]: Specifies bit-wise OR`ed encode pic flags. See ::NV_ENC_PIC_FLAGS enum. */ + uint32_t frameIdx; /**< [in]: Specifies the frame index associated with the input frame [optional]. */ + uint64_t inputTimeStamp; /**< [in]: Specifies presentation timestamp associated with the input picture. */ + uint64_t inputDuration; /**< [in]: Specifies duration of the input picture */ + NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Specifies the input buffer pointer. Client must use a pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource() APIs.*/ + NV_ENC_OUTPUT_PTR outputBitstream; /**< [in]: Specifies the pointer to output buffer. Client should use a pointer obtained from ::NvEncCreateBitstreamBuffer() API. */ + void* completionEvent; /**< [in]: Specifies an event to be signalled on completion of encoding of this Frame [only if operating in Asynchronous mode]. Each output buffer should be associated with a distinct event pointer. */ + NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Specifies the input buffer format. */ + NV_ENC_PIC_STRUCT pictureStruct; /**< [in]: Specifies structure of the input picture. */ + NV_ENC_PIC_TYPE pictureType; /**< [in]: Specifies input picture type. Client required to be set explicitly by the client if the client has not set NV_ENC_INITALIZE_PARAMS::enablePTD to 1 while calling NvInitializeEncoder. */ + NV_ENC_CODEC_PIC_PARAMS codecPicParams; /**< [in]: Specifies the codec specific per-picture encoding parameters. */ + NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE meHintCountsPerBlock[2]; /**< [in]: Specifies the number of hint candidates per block per direction for the current frame. meHintCountsPerBlock[0] is for L0 predictors and meHintCountsPerBlock[1] is for L1 predictors. + The candidate count in NV_ENC_PIC_PARAMS::meHintCountsPerBlock[lx] must never exceed NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[lx] provided during encoder intialization. */ + NVENC_EXTERNAL_ME_HINT *meExternalHints; /**< [in]: Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks multiplied by the total number of candidates per macroblock. + The total number of candidates per MB per direction = 1*meHintCountsPerBlock[Lx].numCandsPerBlk16x16 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk16x8 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk8x8 + + 4*meHintCountsPerBlock[Lx].numCandsPerBlk8x8. For frames using bidirectional ME , the total number of candidates for single macroblock is sum of total number of candidates per MB for each direction (L0 and L1) */ + uint32_t reserved1[6]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[2]; /**< [in]: Reserved and must be set to NULL */ + int8_t *qpDeltaMap; /**< [in]: Specifies the pointer to signed byte array containing QP delta value per MB in raster scan order in the current picture. This QP modifier is applied on top of the QP chosen by rate control. */ + uint32_t qpDeltaMapSize; /**< [in]: Specifies the size in bytes of qpDeltaMap surface allocated by client and pointed to by NV_ENC_PIC_PARAMS::qpDeltaMap. Surface (array) should be picWidthInMbs * picHeightInMbs */ + uint32_t reservedBitFields; /**< [in]: Reserved bitfields and must be set to 0 */ + uint16_t meHintRefPicDist[2]; /**< [in]: Specifies temporal distance for reference picture (NVENC_EXTERNAL_ME_HINT::refidx = 0) used during external ME with NV_ENC_INITALIZE_PARAMS::enablePTD = 1 . meHintRefPicDist[0] is for L0 hints and meHintRefPicDist[1] is for L1 hints. + If not set, will internally infer distance of 1. Ignored for NV_ENC_INITALIZE_PARAMS::enablePTD = 0 */ + uint32_t reserved3[286]; /**< [in]: Reserved and must be set to 0 */ + void* reserved4[60]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_PIC_PARAMS; + +/** Macro for constructing the version field of ::_NV_ENC_PIC_PARAMS */ +#define NV_ENC_PIC_PARAMS_VER (NVENCAPI_STRUCT_VERSION(4) | ( 1<<31 )) + + +/** + * \struct _NV_ENC_MEONLY_PARAMS + * MEOnly parameters that need to be sent on a per motion estimation basis. + */ +typedef struct _NV_ENC_MEONLY_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to NV_ENC_MEONLY_PARAMS_VER.*/ + uint32_t inputWidth; /**< [in]: Specifies the input buffer width */ + uint32_t inputHeight; /**< [in]: Specifies the input buffer height */ + NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Specifies the input buffer pointer. Client must use a pointer obtained from NvEncCreateInputBuffer() or NvEncMapInputResource() APIs. */ + NV_ENC_INPUT_PTR referenceFrame; /**< [in]: Specifies the reference frame pointer */ + NV_ENC_OUTPUT_PTR mvBuffer; /**< [in]: Specifies the pointer to motion vector data buffer allocated by NvEncCreateMVBuffer. Client must lock mvBuffer using ::NvEncLockBitstream() API to get the motion vector data. */ + NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Specifies the input buffer format. */ + void* completionEvent; /**< [in]: Specifies an event to be signalled on completion of motion estimation + of this Frame [only if operating in Asynchronous mode]. + Each output buffer should be associated with a distinct event pointer. */ + uint32_t reserved1[252]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[60]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_MEONLY_PARAMS; + +/** NV_ENC_MEONLY_PARAMS struct version*/ +#define NV_ENC_MEONLY_PARAMS_VER NVENCAPI_STRUCT_VERSION(2) + + +/** + * \struct _NV_ENC_LOCK_BITSTREAM + * Bitstream buffer lock parameters. + */ +typedef struct _NV_ENC_LOCK_BITSTREAM +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_LOCK_BITSTREAM_VER. */ + uint32_t doNotWait :1; /**< [in]: If this flag is set, the NvEncodeAPI interface will return buffer pointer even if operation is not completed. If not set, the call will block until operation completes. */ + uint32_t ltrFrame :1; /**< [out]: Flag indicating this frame is marked as LTR frame */ + uint32_t reservedBitFields :30; /**< [in]: Reserved bit fields and must be set to 0 */ + void* outputBitstream; /**< [in]: Pointer to the bitstream buffer being locked. */ + uint32_t* sliceOffsets; /**< [in,out]: Array which receives the slice offsets. This is not supported if NV_ENC_CONFIG_H264::sliceMode is 1 on Kepler GPUs. Array size must be equal to size of frame in MBs. */ + uint32_t frameIdx; /**< [out]: Frame no. for which the bitstream is being retrieved. */ + uint32_t hwEncodeStatus; /**< [out]: The NvEncodeAPI interface status for the locked picture. */ + uint32_t numSlices; /**< [out]: Number of slices in the encoded picture. Will be reported only if NV_ENC_INITIALIZE_PARAMS::reportSliceOffsets set to 1. */ + uint32_t bitstreamSizeInBytes; /**< [out]: Actual number of bytes generated and copied to the memory pointed by bitstreamBufferPtr. */ + uint64_t outputTimeStamp; /**< [out]: Presentation timestamp associated with the encoded output. */ + uint64_t outputDuration; /**< [out]: Presentation duration associates with the encoded output. */ + void* bitstreamBufferPtr; /**< [out]: Pointer to the generated output bitstream. + For MEOnly mode _NV_ENC_LOCK_BITSTREAM::bitstreamBufferPtr should be typecast to + NV_ENC_H264_MV_DATA/NV_ENC_HEVC_MV_DATA pointer respectively for H264/HEVC */ + NV_ENC_PIC_TYPE pictureType; /**< [out]: Picture type of the encoded picture. */ + NV_ENC_PIC_STRUCT pictureStruct; /**< [out]: Structure of the generated output picture. */ + uint32_t frameAvgQP; /**< [out]: Average QP of the frame. */ + uint32_t frameSatd; /**< [out]: Total SATD cost for whole frame. */ + uint32_t ltrFrameIdx; /**< [out]: Frame index associated with this LTR frame. */ + uint32_t ltrFrameBitmap; /**< [out]: Bitmap of LTR frames indices which were used for encoding this frame. Value of 0 if no LTR frames were used. */ + uint32_t reserved [236]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_LOCK_BITSTREAM; + +/** Macro for constructing the version field of ::_NV_ENC_LOCK_BITSTREAM */ +#define NV_ENC_LOCK_BITSTREAM_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * \struct _NV_ENC_LOCK_INPUT_BUFFER + * Uncompressed Input Buffer lock parameters. + */ +typedef struct _NV_ENC_LOCK_INPUT_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_LOCK_INPUT_BUFFER_VER. */ + uint32_t doNotWait :1; /**< [in]: Set to 1 to make ::NvEncLockInputBuffer() a unblocking call. If the encoding is not completed, driver will return ::NV_ENC_ERR_ENCODER_BUSY error code. */ + uint32_t reservedBitFields :31; /**< [in]: Reserved bitfields and must be set to 0 */ + NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Pointer to the input buffer to be locked, client should pass the pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource API. */ + void* bufferDataPtr; /**< [out]: Pointed to the locked input buffer data. Client can only access input buffer using the \p bufferDataPtr. */ + uint32_t pitch; /**< [out]: Pitch of the locked input buffer. */ + uint32_t reserved1[251]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_LOCK_INPUT_BUFFER; + +/** Macro for constructing the version field of ::_NV_ENC_LOCK_INPUT_BUFFER */ +#define NV_ENC_LOCK_INPUT_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * \struct _NV_ENC_MAP_INPUT_RESOURCE + * Map an input resource to a Nvidia Encoder Input Buffer + */ +typedef struct _NV_ENC_MAP_INPUT_RESOURCE +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_MAP_INPUT_RESOURCE_VER. */ + uint32_t subResourceIndex; /**< [in]: Deprecated. Do not use. */ + void* inputResource; /**< [in]: Deprecated. Do not use. */ + NV_ENC_REGISTERED_PTR registeredResource; /**< [in]: The Registered resource handle obtained by calling NvEncRegisterInputResource. */ + NV_ENC_INPUT_PTR mappedResource; /**< [out]: Mapped pointer corresponding to the registeredResource. This pointer must be used in NV_ENC_PIC_PARAMS::inputBuffer parameter in ::NvEncEncodePicture() API. */ + NV_ENC_BUFFER_FORMAT mappedBufferFmt; /**< [out]: Buffer format of the outputResource. This buffer format must be used in NV_ENC_PIC_PARAMS::bufferFmt if client using the above mapped resource pointer. */ + uint32_t reserved1[251]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[63]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_MAP_INPUT_RESOURCE; + +/** Macro for constructing the version field of ::_NV_ENC_MAP_INPUT_RESOURCE */ +#define NV_ENC_MAP_INPUT_RESOURCE_VER NVENCAPI_STRUCT_VERSION(4) + +/** + * \struct _NV_ENC_REGISTER_RESOURCE + * Register a resource for future use with the Nvidia Video Encoder Interface. + */ +typedef struct _NV_ENC_REGISTER_RESOURCE +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_REGISTER_RESOURCE_VER. */ + NV_ENC_INPUT_RESOURCE_TYPE resourceType; /**< [in]: Specifies the type of resource to be registered. Supported values are ::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX, ::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR. */ + uint32_t width; /**< [in]: Input buffer Width. */ + uint32_t height; /**< [in]: Input buffer Height. */ + uint32_t pitch; /**< [in]: Input buffer Pitch. */ + uint32_t subResourceIndex; /**< [in]: Subresource Index of the DirectX resource to be registered. Should be set to 0 for other interfaces. */ + void* resourceToRegister; /**< [in]: Handle to the resource that is being registered. */ + NV_ENC_REGISTERED_PTR registeredResource; /**< [out]: Registered resource handle. This should be used in future interactions with the Nvidia Video Encoder Interface. */ + NV_ENC_BUFFER_FORMAT bufferFormat; /**< [in]: Buffer format of resource to be registered. */ + uint32_t reserved1[248]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[62]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_REGISTER_RESOURCE; + +/** Macro for constructing the version field of ::_NV_ENC_REGISTER_RESOURCE */ +#define NV_ENC_REGISTER_RESOURCE_VER NVENCAPI_STRUCT_VERSION(3) + +/** + * \struct _NV_ENC_STAT + * Encode Stats structure. + */ +typedef struct _NV_ENC_STAT +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_STAT_VER. */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + NV_ENC_OUTPUT_PTR outputBitStream; /**< [out]: Specifies the pointer to output bitstream. */ + uint32_t bitStreamSize; /**< [out]: Size of generated bitstream in bytes. */ + uint32_t picType; /**< [out]: Picture type of encoded picture. See ::NV_ENC_PIC_TYPE. */ + uint32_t lastValidByteOffset; /**< [out]: Offset of last valid bytes of completed bitstream */ + uint32_t sliceOffsets[16]; /**< [out]: Offsets of each slice */ + uint32_t picIdx; /**< [out]: Picture number */ + uint32_t reserved1[233]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_STAT; + +/** Macro for constructing the version field of ::_NV_ENC_STAT */ +#define NV_ENC_STAT_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * \struct _NV_ENC_SEQUENCE_PARAM_PAYLOAD + * Sequence and picture paramaters payload. + */ +typedef struct _NV_ENC_SEQUENCE_PARAM_PAYLOAD +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_INITIALIZE_PARAMS_VER. */ + uint32_t inBufferSize; /**< [in]: Specifies the size of the spsppsBuffer provied by the client */ + uint32_t spsId; /**< [in]: Specifies the SPS id to be used in sequence header. Default value is 0. */ + uint32_t ppsId; /**< [in]: Specifies the PPS id to be used in picture header. Default value is 0. */ + void* spsppsBuffer; /**< [in]: Specifies bitstream header pointer of size NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize. It is the client's responsibility to manage this memory. */ + uint32_t* outSPSPPSPayloadSize; /**< [out]: Size of the sequence and picture header in bytes written by the NvEncodeAPI interface to the SPSPPSBuffer. */ + uint32_t reserved [250]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_SEQUENCE_PARAM_PAYLOAD; + +/** Macro for constructing the version field of ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD */ +#define NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * Event registration/unregistration parameters. + */ +typedef struct _NV_ENC_EVENT_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_EVENT_PARAMS_VER. */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + void* completionEvent; /**< [in]: Handle to event to be registered/unregistered with the NvEncodeAPI interface. */ + uint32_t reserved1[253]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_EVENT_PARAMS; + +/** Macro for constructing the version field of ::_NV_ENC_EVENT_PARAMS */ +#define NV_ENC_EVENT_PARAMS_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * Encoder Session Creation parameters + */ +typedef struct _NV_ENC_OPEN_ENCODE_SESSIONEX_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER. */ + NV_ENC_DEVICE_TYPE deviceType; /**< [in]: Specified the device Type */ + void* device; /**< [in]: Pointer to client device. */ + void* reserved; /**< [in]: Reserved and must be set to 0. */ + uint32_t apiVersion; /**< [in]: API version. Should be set to NVENCAPI_VERSION. */ + uint32_t reserved1[253]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS; +/** Macro for constructing the version field of ::_NV_ENC_OPEN_ENCODE_SESSIONEX_PARAMS */ +#define NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER NVENCAPI_STRUCT_VERSION(1) + +/** @} */ /* END ENCODER_STRUCTURE */ + + +/** + * \addtogroup ENCODE_FUNC NvEncodeAPI Functions + * @{ + */ + +// NvEncOpenEncodeSession +/** + * \brief Opens an encoding session. + * + * Deprecated. + * + * \return + * ::NV_ENC_ERR_INVALID_CALL\n + * + */ +NVENCSTATUS NVENCAPI NvEncOpenEncodeSession (void* device, uint32_t deviceType, void** encoder); + +// NvEncGetEncodeGuidCount +/** + * \brief Retrieves the number of supported encode GUIDs. + * + * The function returns the number of codec guids supported by the NvEncodeAPI + * interface. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [out] encodeGUIDCount + * Number of supported encode GUIDs. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeGUIDCount (void* encoder, uint32_t* encodeGUIDCount); + + +// NvEncGetEncodeGUIDs +/** + * \brief Retrieves an array of supported encoder codec GUIDs. + * + * The function returns an array of codec guids supported by the NvEncodeAPI interface. + * The client must allocate an array where the NvEncodeAPI interface can + * fill the supported guids and pass the pointer in \p *GUIDs parameter. + * The size of the array can be determined by using ::NvEncGetEncodeGUIDCount() API. + * The Nvidia Encoding interface returns the number of codec guids it has actually + * filled in the guid array in the \p GUIDCount parameter. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] guidArraySize + * Number of GUIDs to retrieved. Should be set to the number retrieved using + * ::NvEncGetEncodeGUIDCount. + * \param [out] GUIDs + * Array of supported Encode GUIDs. + * \param [out] GUIDCount + * Number of supported Encode GUIDs. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeGUIDs (void* encoder, GUID* GUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); + + +// NvEncGetEncodeProfileGuidCount +/** + * \brief Retrieves the number of supported profile GUIDs. + * + * The function returns the number of profile GUIDs supported for a given codec. + * The client must first enumerate the codec guids supported by the NvEncodeAPI + * interface. After determining the codec guid, it can query the NvEncodeAPI + * interface to determine the number of profile guids supported for a particular + * codec guid. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * The codec guid for which the profile guids are being enumerated. + * \param [out] encodeProfileGUIDCount + * Number of encode profiles supported for the given encodeGUID. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeProfileGUIDCount (void* encoder, GUID encodeGUID, uint32_t* encodeProfileGUIDCount); + + +// NvEncGetEncodeProfileGUIDs +/** + * \brief Retrieves an array of supported encode profile GUIDs. + * + * The function returns an array of supported profile guids for a particular + * codec guid. The client must allocate an array where the NvEncodeAPI interface + * can populate the profile guids. The client can determine the array size using + * ::NvEncGetEncodeProfileGUIDCount() API. The client must also validiate that the + * NvEncodeAPI interface supports the GUID the client wants to pass as \p encodeGUID + * parameter. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * The encode guid whose profile guids are being enumerated. + * \param [in] guidArraySize + * Number of GUIDs to be retrieved. Should be set to the number retrieved using + * ::NvEncGetEncodeProfileGUIDCount. + * \param [out] profileGUIDs + * Array of supported Encode Profile GUIDs + * \param [out] GUIDCount + * Number of valid encode profile GUIDs in \p profileGUIDs array. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeProfileGUIDs (void* encoder, GUID encodeGUID, GUID* profileGUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); + +// NvEncGetInputFormatCount +/** + * \brief Retrieve the number of supported Input formats. + * + * The function returns the number of supported input formats. The client must + * query the NvEncodeAPI interface to determine the supported input formats + * before creating the input surfaces. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the number of supported input formats + * is to be retrieved. + * \param [out] inputFmtCount + * Number of input formats supported for specified Encode GUID. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncGetInputFormatCount (void* encoder, GUID encodeGUID, uint32_t* inputFmtCount); + + +// NvEncGetInputFormats +/** + * \brief Retrieves an array of supported Input formats + * + * Returns an array of supported input formats The client must use the input + * format to create input surface using ::NvEncCreateInputBuffer() API. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the number of supported input formats + * is to be retrieved. + *\param [in] inputFmtArraySize + * Size input format count array passed in \p inputFmts. + *\param [out] inputFmts + * Array of input formats supported for this Encode GUID. + *\param [out] inputFmtCount + * The number of valid input format types returned by the NvEncodeAPI + * interface in \p inputFmts array. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetInputFormats (void* encoder, GUID encodeGUID, NV_ENC_BUFFER_FORMAT* inputFmts, uint32_t inputFmtArraySize, uint32_t* inputFmtCount); + + +// NvEncGetEncodeCaps +/** + * \brief Retrieves the capability value for a specified encoder attribute. + * + * The function returns the capability value for a given encoder attribute. The + * client must validate the encodeGUID using ::NvEncGetEncodeGUIDs() API before + * calling this function. The encoder attribute being queried are enumerated in + * ::NV_ENC_CAPS_PARAM enum. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the capability attribute is to be retrieved. + * \param [in] capsParam + * Used to specify attribute being queried. Refer ::NV_ENC_CAPS_PARAM for more + * details. + * \param [out] capsVal + * The value corresponding to the capability attribute being queried. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeCaps (void* encoder, GUID encodeGUID, NV_ENC_CAPS_PARAM* capsParam, int* capsVal); + + +// NvEncGetEncodePresetCount +/** + * \brief Retrieves the number of supported preset GUIDs. + * + * The function returns the number of preset GUIDs available for a given codec. + * The client must validate the codec guid using ::NvEncGetEncodeGUIDs() API + * before calling this function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the number of supported presets is to + * be retrieved. + * \param [out] encodePresetGUIDCount + * Receives the number of supported preset GUIDs. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodePresetCount (void* encoder, GUID encodeGUID, uint32_t* encodePresetGUIDCount); + + +// NvEncGetEncodePresetGUIDs +/** + * \brief Receives an array of supported encoder preset GUIDs. + * + * The function returns an array of encode preset guids available for a given codec. + * The client can directly use one of the preset guids based upon the use case + * or target device. The preset guid chosen can be directly used in + * NV_ENC_INITIALIZE_PARAMS::presetGUID parameter to ::NvEncEncodePicture() API. + * Alternately client can also use the preset guid to retrieve the encoding config + * parameters being used by NvEncodeAPI interface for that given preset, using + * ::NvEncGetEncodePresetConfig() API. It can then modify preset config parameters + * as per its use case and send it to NvEncodeAPI interface as part of + * NV_ENC_INITIALIZE_PARAMS::encodeConfig parameter for NvEncInitializeEncoder() + * API. + * + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the list of supported presets is to be + * retrieved. + * \param [in] guidArraySize + * Size of array of preset guids passed in \p preset GUIDs + * \param [out] presetGUIDs + * Array of supported Encode preset GUIDs from the NvEncodeAPI interface + * to client. + * \param [out] encodePresetGUIDCount + * Receives the number of preset GUIDs returned by the NvEncodeAPI + * interface. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodePresetGUIDs (void* encoder, GUID encodeGUID, GUID* presetGUIDs, uint32_t guidArraySize, uint32_t* encodePresetGUIDCount); + + +// NvEncGetEncodePresetConfig +/** + * \brief Returns a preset config structure supported for given preset GUID. + * + * The function returns a preset config structure for a given preset guid. Before + * using this function the client must enumerate the preset guids available for + * a given codec. The preset config structure can be modified by the client depending + * upon its use case and can be then used to initialize the encoder using + * ::NvEncInitializeEncoder() API. The client can use this function only if it + * wants to modify the NvEncodeAPI preset configuration, otherwise it can + * directly use the preset guid. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the list of supported presets is to be + * retrieved. + * \param [in] presetGUID + * Preset GUID, corresponding to which the Encoding configurations is to be + * retrieved. + * \param [out] presetConfig + * The requested Preset Encoder Attribute set. Refer ::_NV_ENC_CONFIG for +* more details. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodePresetConfig (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_PRESET_CONFIG* presetConfig); + +// NvEncInitializeEncoder +/** + * \brief Initialize the encoder. + * + * This API must be used to initialize the encoder. The initialization parameter + * is passed using \p *createEncodeParams The client must send the following + * fields of the _NV_ENC_INITIALIZE_PARAMS structure with a valid value. + * - NV_ENC_INITIALIZE_PARAMS::encodeGUID + * - NV_ENC_INITIALIZE_PARAMS::encodeWidth + * - NV_ENC_INITIALIZE_PARAMS::encodeHeight + * + * The client can pass a preset guid directly to the NvEncodeAPI interface using + * NV_ENC_INITIALIZE_PARAMS::presetGUID field. If the client doesn't pass + * NV_ENC_INITIALIZE_PARAMS::encodeConfig structure, the codec specific parameters + * will be selected based on the preset guid. The preset guid must have been + * validated by the client using ::NvEncGetEncodePresetGUIDs() API. + * If the client passes a custom ::_NV_ENC_CONFIG structure through + * NV_ENC_INITIALIZE_PARAMS::encodeConfig , it will override the codec specific parameters + * based on the preset guid. It is recommended that even if the client passes a custom config, + * it should also send a preset guid. In this case, the preset guid passed by the client + * will not override any of the custom config parameters programmed by the client, + * it is only used as a hint by the NvEncodeAPI interface to determine certain encoder parameters + * which are not exposed to the client. + * + * There are two modes of operation for the encoder namely: + * - Asynchronous mode + * - Synchronous mode + * + * The client can select asynchronous or synchronous mode by setting the \p + * enableEncodeAsync field in ::_NV_ENC_INITIALIZE_PARAMS to 1 or 0 respectively. + *\par Asynchronous mode of operation: + * The Asynchronous mode can be enabled by setting NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 1. + * The client operating in asynchronous mode must allocate completion event object + * for each output buffer and pass the completion event object in the + * ::NvEncEncodePicture() API. The client can create another thread and wait on + * the event object to be signalled by NvEncodeAPI interface on completion of the + * encoding process for the output frame. This should unblock the main thread from + * submitting work to the encoder. When the event is signalled the client can call + * NvEncodeAPI interfaces to copy the bitstream data using ::NvEncLockBitstream() + * API. This is the preferred mode of operation. + * + * NOTE: Asynchronous mode is not supported on Linux. + * + *\par Synchronous mode of operation: + * The client can select synchronous mode by setting NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 0. + * The client working in synchronous mode can work in a single threaded or multi + * threaded mode. The client need not allocate any event objects. The client can + * only lock the bitstream data after NvEncodeAPI interface has returned + * ::NV_ENC_SUCCESS from encode picture. The NvEncodeAPI interface can return + * ::NV_ENC_ERR_NEED_MORE_INPUT error code from ::NvEncEncodePicture() API. The + * client must not lock the output buffer in such case but should send the next + * frame for encoding. The client must keep on calling ::NvEncEncodePicture() API + * until it returns ::NV_ENC_SUCCESS. \n + * The client must always lock the bitstream data in order in which it has submitted. + * This is true for both asynchronous and synchronous mode. + * + *\par Picture type decision: + * If the client is taking the picture type decision and it must disable the picture + * type decision module in NvEncodeAPI by setting NV_ENC_INITIALIZE_PARAMS::enablePTD + * to 0. In this case the client is required to send the picture in encoding + * order to NvEncodeAPI by doing the re-ordering for B frames. \n + * If the client doesn't want to take the picture type decision it can enable + * picture type decision module in the NvEncodeAPI interface by setting + * NV_ENC_INITIALIZE_PARAMS::enablePTD to 1 and send the input pictures in display + * order. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] createEncodeParams + * Refer ::_NV_ENC_INITIALIZE_PARAMS for details. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncInitializeEncoder (void* encoder, NV_ENC_INITIALIZE_PARAMS* createEncodeParams); + + +// NvEncCreateInputBuffer +/** + * \brief Allocates Input buffer. + * + * This function is used to allocate an input buffer. The client must enumerate + * the input buffer format before allocating the input buffer resources. The + * NV_ENC_INPUT_PTR returned by the NvEncodeAPI interface in the + * NV_ENC_CREATE_INPUT_BUFFER::inputBuffer field can be directly used in + * ::NvEncEncodePicture() API. The number of input buffers to be allocated by the + * client must be at least 4 more than the number of B frames being used for encoding. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] createInputBufferParams + * Pointer to the ::NV_ENC_CREATE_INPUT_BUFFER structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncCreateInputBuffer (void* encoder, NV_ENC_CREATE_INPUT_BUFFER* createInputBufferParams); + + +// NvEncDestroyInputBuffer +/** + * \brief Release an input buffers. + * + * This function is used to free an input buffer. If the client has allocated + * any input buffer using ::NvEncCreateInputBuffer() API, it must free those + * input buffers by calling this function. The client must release the input + * buffers before destroying the encoder using ::NvEncDestroyEncoder() API. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] inputBuffer + * Pointer to the input buffer to be released. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncDestroyInputBuffer (void* encoder, NV_ENC_INPUT_PTR inputBuffer); + + +// NvEncCreateBitstreamBuffer +/** + * \brief Allocates an output bitstream buffer + * + * This function is used to allocate an output bitstream buffer and returns a + * NV_ENC_OUTPUT_PTR to bitstream buffer to the client in the + * NV_ENC_CREATE_BITSTREAM_BUFFER::bitstreamBuffer field. + * The client can only call this function after the encoder session has been + * initialized using ::NvEncInitializeEncoder() API. The minimum number of output + * buffers allocated by the client must be at least 4 more than the number of B + * B frames being used for encoding. The client can only access the output + * bitsteam data by locking the \p bitstreamBuffer using the ::NvEncLockBitstream() + * function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] createBitstreamBufferParams + * Pointer ::NV_ENC_CREATE_BITSTREAM_BUFFER for details. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncCreateBitstreamBuffer (void* encoder, NV_ENC_CREATE_BITSTREAM_BUFFER* createBitstreamBufferParams); + + +// NvEncDestroyBitstreamBuffer +/** + * \brief Release a bitstream buffer. + * + * This function is used to release the output bitstream buffer allocated using + * the ::NvEncCreateBitstreamBuffer() function. The client must release the output + * bitstreamBuffer using this function before destroying the encoder session. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] bitstreamBuffer + * Pointer to the bitstream buffer being released. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncDestroyBitstreamBuffer (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); + +// NvEncEncodePicture +/** + * \brief Submit an input picture for encoding. + * + * This function is used to submit an input picture buffer for encoding. The + * encoding parameters are passed using \p *encodePicParams which is a pointer + * to the ::_NV_ENC_PIC_PARAMS structure. + * + * If the client has set NV_ENC_INITIALIZE_PARAMS::enablePTD to 0, then it must + * send a valid value for the following fields. + * - NV_ENC_PIC_PARAMS::pictureType + * - NV_ENC_PIC_PARAMS_H264::displayPOCSyntax (H264 only) + * - NV_ENC_PIC_PARAMS_H264::frameNumSyntax(H264 only) + * - NV_ENC_PIC_PARAMS_H264::refPicFlag(H264 only) + * + * + *\par Asynchronous Encoding + * If the client has enabled asynchronous mode of encoding by setting + * NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 1 in the ::NvEncInitializeEncoder() + * API ,then the client must send a valid NV_ENC_PIC_PARAMS::completionEvent. + * Incase of asynchronous mode of operation, client can queue the ::NvEncEncodePicture() + * API commands from the main thread and then queue output buffers to be processed + * to a secondary worker thread. Before the locking the output buffers in the + * secondary thread , the client must wait on NV_ENC_PIC_PARAMS::completionEvent + * it has queued in ::NvEncEncodePicture() API call. The client must always process + * completion event and the output buffer in the same order in which they have been + * submitted for encoding. The NvEncodeAPI interface is responsible for any + * re-ordering required for B frames and will always ensure that encoded bitstream + * data is written in the same order in which output buffer is submitted. + *\code + The below example shows how asynchronous encoding in case of 1 B frames + ------------------------------------------------------------------------ + Suppose the client allocated 4 input buffers(I1,I2..), 4 output buffers(O1,O2..) + and 4 completion events(E1, E2, ...). The NvEncodeAPI interface will need to + keep a copy of the input buffers for re-ordering and it allocates following + internal buffers (NvI1, NvI2...). These internal buffers are managed by NvEncodeAPI + and the client is not responsible for the allocating or freeing the memory of + the internal buffers. + + a) The client main thread will queue the following encode frame calls. + Note the picture type is unknown to the client, the decision is being taken by + NvEncodeAPI interface. The client should pass ::_NV_ENC_PIC_PARAMS parameter + consisting of allocated input buffer, output buffer and output events in successive + ::NvEncEncodePicture() API calls along with other required encode picture params. + For example: + 1st EncodePicture parameters - (I1, O1, E1) + 2nd EncodePicture parameters - (I2, O2, E2) + 3rd EncodePicture parameters - (I3, O3, E3) + + b) NvEncodeAPI SW will receive the following encode Commands from the client. + The left side shows input from client in the form (Input buffer, Output Buffer, + Output Event). The right hand side shows a possible picture type decision take by + the NvEncodeAPI interface. + (I1, O1, E1) ---P1 Frame + (I2, O2, E2) ---B2 Frame + (I3, O3, E3) ---P3 Frame + + c) NvEncodeAPI interface will make a copy of the input buffers to its internal + buffersfor re-ordering. These copies are done as part of nvEncEncodePicture + function call from the client and NvEncodeAPI interface is responsible for + synchronization of copy operation with the actual encoding operation. + I1 --> NvI1 + I2 --> NvI2 + I3 --> NvI3 + + d) After returning from ::NvEncEncodePicture() call , the client must queue the output + bitstream processing work to the secondary thread. The output bitstream processing + for asynchronous mode consist of first waiting on completion event(E1, E2..) + and then locking the output bitstream buffer(O1, O2..) for reading the encoded + data. The work queued to the secondary thread by the client is in the following order + (I1, O1, E1) + (I2, O2, E2) + (I3, O3, E3) + Note they are in the same order in which client calls ::NvEncEncodePicture() API + in \p step a). + + e) NvEncodeAPI interface will do the re-ordering such that Encoder HW will receive + the following encode commands: + (NvI1, O1, E1) ---P1 Frame + (NvI3, O2, E2) ---P3 Frame + (NvI2, O3, E3) ---B2 frame + + f) After the encoding operations are completed, the events will be signalled + by NvEncodeAPI interface in the following order : + (O1, E1) ---P1 Frame ,output bitstream copied to O1 and event E1 signalled. + (O2, E2) ---P3 Frame ,output bitstream copied to O2 and event E2 signalled. + (O3, E3) ---B2 Frame ,output bitstream copied to O3 and event E3 signalled. + + g) The client must lock the bitstream data using ::NvEncLockBitstream() API in + the order O1,O2,O3 to read the encoded data, after waiting for the events + to be signalled in the same order i.e E1, E2 and E3.The output processing is + done in the secondary thread in the following order: + Waits on E1, copies encoded bitstream from O1 + Waits on E2, copies encoded bitstream from O2 + Waits on E3, copies encoded bitstream from O3 + + -Note the client will receive the events signalling and output buffer in the + same order in which they have submitted for encoding. + -Note the LockBitstream will have picture type field which will notify the + output picture type to the clients. + -Note the input, output buffer and the output completion event are free to be + reused once NvEncodeAPI interfaced has signalled the event and the client has + copied the data from the output buffer. + + * \endcode + * + *\par Synchronous Encoding + * The client can enable synchronous mode of encoding by setting + * NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 0 in ::NvEncInitializeEncoder() API. + * The NvEncodeAPI interface may return ::NV_ENC_ERR_NEED_MORE_INPUT error code for + * some ::NvEncEncodePicture() API calls when NV_ENC_INITIALIZE_PARAMS::enablePTD + * is set to 1, but the client must not treat it as a fatal error. The NvEncodeAPI + * interface might not be able to submit an input picture buffer for encoding + * immediately due to re-ordering for B frames. The NvEncodeAPI interface cannot + * submit the input picture which is decided to be encoded as B frame as it waits + * for backward reference from temporally subsequent frames. This input picture + * is buffered internally and waits for more input picture to arrive. The client + * must not call ::NvEncLockBitstream() API on the output buffers whose + * ::NvEncEncodePicture() API returns ::NV_ENC_ERR_NEED_MORE_INPUT. The client must + * wait for the NvEncodeAPI interface to return ::NV_ENC_SUCCESS before locking the + * output bitstreams to read the encoded bitstream data. The following example + * explains the scenario with synchronous encoding with 2 B frames. + *\code + The below example shows how synchronous encoding works in case of 1 B frames + ----------------------------------------------------------------------------- + Suppose the client allocated 4 input buffers(I1,I2..), 4 output buffers(O1,O2..) + and 4 completion events(E1, E2, ...). The NvEncodeAPI interface will need to + keep a copy of the input buffers for re-ordering and it allocates following + internal buffers (NvI1, NvI2...). These internal buffers are managed by NvEncodeAPI + and the client is not responsible for the allocating or freeing the memory of + the internal buffers. + + The client calls ::NvEncEncodePicture() API with input buffer I1 and output buffer O1. + The NvEncodeAPI decides to encode I1 as P frame and submits it to encoder + HW and returns ::NV_ENC_SUCCESS. + The client can now read the encoded data by locking the output O1 by calling + NvEncLockBitstream API. + + The client calls ::NvEncEncodePicture() API with input buffer I2 and output buffer O2. + The NvEncodeAPI decides to encode I2 as B frame and buffers I2 by copying it + to internal buffer and returns ::NV_ENC_ERR_NEED_MORE_INPUT. + The error is not fatal and it notifies client that it cannot read the encoded + data by locking the output O2 by calling ::NvEncLockBitstream() API without submitting + more work to the NvEncodeAPI interface. + + The client calls ::NvEncEncodePicture() with input buffer I3 and output buffer O3. + The NvEncodeAPI decides to encode I3 as P frame and it first submits I3 for + encoding which will be used as backward reference frame for I2. + The NvEncodeAPI then submits I2 for encoding and returns ::NV_ENC_SUCESS. Both + the submission are part of the same ::NvEncEncodePicture() function call. + The client can now read the encoded data for both the frames by locking the output + O2 followed by O3 ,by calling ::NvEncLockBitstream() API. + + The client must always lock the output in the same order in which it has submitted + to receive the encoded bitstream in correct encoding order. + + * \endcode + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] encodePicParams + * Pointer to the ::_NV_ENC_PIC_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_BUSY \n + * ::NV_ENC_ERR_NEED_MORE_INPUT \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncEncodePicture (void* encoder, NV_ENC_PIC_PARAMS* encodePicParams); + + +// NvEncLockBitstream +/** + * \brief Lock output bitstream buffer + * + * This function is used to lock the bitstream buffer to read the encoded data. + * The client can only access the encoded data by calling this function. + * The pointer to client accessible encoded data is returned in the + * NV_ENC_LOCK_BITSTREAM::bitstreamBufferPtr field. The size of the encoded data + * in the output buffer is returned in the NV_ENC_LOCK_BITSTREAM::bitstreamSizeInBytes + * The NvEncodeAPI interface also returns the output picture type and picture structure + * of the encoded frame in NV_ENC_LOCK_BITSTREAM::pictureType and + * NV_ENC_LOCK_BITSTREAM::pictureStruct fields respectively. If the client has + * set NV_ENC_LOCK_BITSTREAM::doNotWait to 1, the function might return + * ::NV_ENC_ERR_LOCK_BUSY if client is operating in synchronous mode. This is not + * a fatal failure if NV_ENC_LOCK_BITSTREAM::doNotWait is set to 1. In the above case the client can + * retry the function after few milliseconds. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] lockBitstreamBufferParams + * Pointer to the ::_NV_ENC_LOCK_BITSTREAM structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_LOCK_BUSY \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncLockBitstream (void* encoder, NV_ENC_LOCK_BITSTREAM* lockBitstreamBufferParams); + + +// NvEncUnlockBitstream +/** + * \brief Unlock the output bitstream buffer + * + * This function is used to unlock the output bitstream buffer after the client + * has read the encoded data from output buffer. The client must call this function + * to unlock the output buffer which it has previously locked using ::NvEncLockBitstream() + * function. Using a locked bitstream buffer in ::NvEncEncodePicture() API will cause + * the function to fail. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] bitstreamBuffer + * bitstream buffer pointer being unlocked + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnlockBitstream (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); + + +// NvLockInputBuffer +/** + * \brief Locks an input buffer + * + * This function is used to lock the input buffer to load the uncompressed YUV + * pixel data into input buffer memory. The client must pass the NV_ENC_INPUT_PTR + * it had previously allocated using ::NvEncCreateInputBuffer()in the + * NV_ENC_LOCK_INPUT_BUFFER::inputBuffer field. + * The NvEncodeAPI interface returns pointer to client accessible input buffer + * memory in NV_ENC_LOCK_INPUT_BUFFER::bufferDataPtr field. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] lockInputBufferParams + * Pointer to the ::_NV_ENC_LOCK_INPUT_BUFFER structure + * + * \return + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_LOCK_BUSY \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncLockInputBuffer (void* encoder, NV_ENC_LOCK_INPUT_BUFFER* lockInputBufferParams); + + +// NvUnlockInputBuffer +/** + * \brief Unlocks the input buffer + * + * This function is used to unlock the input buffer memory previously locked for + * uploading YUV pixel data. The input buffer must be unlocked before being used + * again for encoding, otherwise NvEncodeAPI will fail the ::NvEncEncodePicture() + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] inputBuffer + * Pointer to the input buffer that is being unlocked. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + * + */ +NVENCSTATUS NVENCAPI NvEncUnlockInputBuffer (void* encoder, NV_ENC_INPUT_PTR inputBuffer); + + +// NvEncGetEncodeStats +/** + * \brief Get encoding statistics. + * + * This function is used to retrieve the encoding statistics. + * This API is not supported when encode device type is CUDA. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] encodeStats + * Pointer to the ::_NV_ENC_STAT structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeStats (void* encoder, NV_ENC_STAT* encodeStats); + + +// NvEncGetSequenceParams +/** + * \brief Get encoded sequence and picture header. + * + * This function can be used to retrieve the sequence and picture header out of + * band. The client must call this function only after the encoder has been + * initialized using ::NvEncInitializeEncoder() function. The client must + * allocate the memory where the NvEncodeAPI interface can copy the bitstream + * header and pass the pointer to the memory in NV_ENC_SEQUENCE_PARAM_PAYLOAD::spsppsBuffer. + * The size of buffer is passed in the field NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize. + * The NvEncodeAPI interface will copy the bitstream header payload and returns + * the actual size of the bitstream header in the field + * NV_ENC_SEQUENCE_PARAM_PAYLOAD::outSPSPPSPayloadSize. + * The client must call ::NvEncGetSequenceParams() function from the same thread which is + * being used to call ::NvEncEncodePicture() function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] sequenceParamPayload + * Pointer to the ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetSequenceParams (void* encoder, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload); + + +// NvEncRegisterAsyncEvent +/** + * \brief Register event for notification to encoding completion. + * + * This function is used to register the completion event with NvEncodeAPI + * interface. The event is required when the client has configured the encoder to + * work in asynchronous mode. In this mode the client needs to send a completion + * event with every output buffer. The NvEncodeAPI interface will signal the + * completion of the encoding process using this event. Only after the event is + * signalled the client can get the encoded data using ::NvEncLockBitstream() function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] eventParams + * Pointer to the ::_NV_ENC_EVENT_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncRegisterAsyncEvent (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); + + +// NvEncUnregisterAsyncEvent +/** + * \brief Unregister completion event. + * + * This function is used to unregister completion event which has been previously + * registered using ::NvEncRegisterAsyncEvent() function. The client must unregister + * all events before destroying the encoder using ::NvEncDestroyEncoder() function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] eventParams + * Pointer to the ::_NV_ENC_EVENT_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnregisterAsyncEvent (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); + + +// NvEncMapInputResource +/** + * \brief Map an externally created input resource pointer for encoding. + * + * Maps an externally allocated input resource [using and returns a NV_ENC_INPUT_PTR + * which can be used for encoding in the ::NvEncEncodePicture() function. The + * mapped resource is returned in the field NV_ENC_MAP_INPUT_RESOURCE::outputResourcePtr. + * The NvEncodeAPI interface also returns the buffer format of the mapped resource + * in the field NV_ENC_MAP_INPUT_RESOURCE::outbufferFmt. + * This function provides synchronization guarantee that any direct3d or cuda + * work submitted on the input buffer is completed before the buffer is used for encoding. + * The client should not access any input buffer while they are mapped by the encoder. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] mapInputResParams + * Pointer to the ::_NV_ENC_MAP_INPUT_RESOURCE structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n + * ::NV_ENC_ERR_MAP_FAILED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncMapInputResource (void* encoder, NV_ENC_MAP_INPUT_RESOURCE* mapInputResParams); + + +// NvEncUnmapInputResource +/** + * \brief UnMaps a NV_ENC_INPUT_PTR which was mapped for encoding + * + * + * UnMaps an input buffer which was previously mapped using ::NvEncMapInputResource() + * API. The mapping created using ::NvEncMapInputResource() should be invalidated + * using this API before the external resource is destroyed by the client. The client + * must unmap the buffer after ::NvEncLockBitstream() API returns succuessfully for encode + * work submitted using the mapped input buffer. + * + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] mappedInputBuffer + * Pointer to the NV_ENC_INPUT_PTR + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n + * ::NV_ENC_ERR_RESOURCE_NOT_MAPPED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnmapInputResource (void* encoder, NV_ENC_INPUT_PTR mappedInputBuffer); + +// NvEncDestroyEncoder +/** + * \brief Destroy Encoding Session + * + * Destroys the encoder session previously created using ::NvEncOpenEncodeSession() + * function. The client must flush the encoder before freeing any resources. In order + * to flush the encoder the client must pass a NULL encode picture packet and either + * wait for the ::NvEncEncodePicture() function to return in synchronous mode or wait + * for the flush event to be signaled by the encoder in asynchronous mode. + * The client must free all the input and output resources created using the + * NvEncodeAPI interface before destroying the encoder. If the client is operating + * in asynchronous mode, it must also unregister the completion events previously + * registered. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncDestroyEncoder (void* encoder); + +// NvEncInvalidateRefFrames +/** + * \brief Invalidate reference frames + * + * Invalidates reference frame based on the time stamp provided by the client. + * The encoder marks any reference frames or any frames which have been reconstructed + * using the corrupt frame as invalid for motion estimation and uses older reference + * frames for motion estimation. The encoded forces the current frame to be encoded + * as an intra frame if no reference frames are left after invalidation process. + * This is useful for low latency application for error resiliency. The client + * is recommended to set NV_ENC_CONFIG_H264::maxNumRefFrames to a large value so + * that encoder can keep a backup of older reference frames in the DPB and can use them + * for motion estimation when the newer reference frames have been invalidated. + * This API can be called multiple times. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] invalidRefFrameTimeStamp + * Timestamp of the invalid reference frames which needs to be invalidated. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncInvalidateRefFrames(void* encoder, uint64_t invalidRefFrameTimeStamp); + +// NvEncOpenEncodeSessionEx +/** + * \brief Opens an encoding session. + * + * Opens an encoding session and returns a pointer to the encoder interface in + * the \p **encoder parameter. The client should start encoding process by calling + * this API first. + * The client must pass a pointer to IDirect3DDevice9/CUDA interface in the \p *device parameter. + * If the creation of encoder session fails, the client must call ::NvEncDestroyEncoder API + * before exiting. + * + * \param [in] openSessionExParams + * Pointer to a ::NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS structure. + * \param [out] encoder + * Encode Session pointer to the NvEncodeAPI interface. + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_NO_ENCODE_DEVICE \n + * ::NV_ENC_ERR_UNSUPPORTED_DEVICE \n + * ::NV_ENC_ERR_INVALID_DEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncOpenEncodeSessionEx (NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS *openSessionExParams, void** encoder); + +// NvEncRegisterResource +/** + * \brief Registers a resource with the Nvidia Video Encoder Interface. + * + * Registers a resource with the Nvidia Video Encoder Interface for book keeping. + * The client is expected to pass the registered resource handle as well, while calling ::NvEncMapInputResource API. + * This API is not implemented for the DirectX Interface. + * DirectX based clients need not change their implementation. + * + * \param [in] encoder + * Pointer to the NVEncodeAPI interface. + * + * \param [in] registerResParams + * Pointer to a ::_NV_ENC_REGISTER_RESOURCE structure + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_REGISTER_FAILED \n + * ::NV_ENC_ERR_GENERIC \n + * ::NV_ENC_ERR_UNIMPLEMENTED \n + * + */ +NVENCSTATUS NVENCAPI NvEncRegisterResource (void* encoder, NV_ENC_REGISTER_RESOURCE* registerResParams); + +// NvEncUnregisterResource +/** + * \brief Unregisters a resource previously registered with the Nvidia Video Encoder Interface. + * + * Unregisters a resource previously registered with the Nvidia Video Encoder Interface. + * The client is expected to unregister any resource that it has registered with the + * Nvidia Video Encoder Interface before destroying the resource. + * This API is not implemented for the DirectX Interface. + * DirectX based clients need not change their implementation. + * + * \param [in] encoder + * Pointer to the NVEncodeAPI interface. + * + * \param [in] registeredResource + * The registered resource pointer that was returned in ::NvEncRegisterResource. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n + * ::NV_ENC_ERR_GENERIC \n + * ::NV_ENC_ERR_UNIMPLEMENTED \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnregisterResource (void* encoder, NV_ENC_REGISTERED_PTR registeredResource); + +// NvEncReconfigureEncoder +/** + * \brief Reconfigure an existing encoding session. + * + * Reconfigure an existing encoding session. + * The client should call this API to change/reconfigure the parameter passed during + * NvEncInitializeEncoder API call. + * Currently Reconfiguration of following are not supported. + * Change in GOP structure. + * Change in sync-Async mode. + * Change in MaxWidth & MaxHeight. + * Change in PTDmode. + * + * Resolution change is possible only if maxEncodeWidth & maxEncodeHeight of NV_ENC_INITIALIZE_PARAMS + * is set while creating encoder session. + * + * \param [in] encoder + * Pointer to the NVEncodeAPI interface. + * + * \param [in] reInitEncodeParams + * Pointer to a ::NV_ENC_RECONFIGURE_PARAMS structure. + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_NO_ENCODE_DEVICE \n + * ::NV_ENC_ERR_UNSUPPORTED_DEVICE \n + * ::NV_ENC_ERR_INVALID_DEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncReconfigureEncoder (void *encoder, NV_ENC_RECONFIGURE_PARAMS* reInitEncodeParams); + + + +// NvEncCreateMVBuffer +/** + * \brief Allocates output MV buffer for ME only mode. + * + * This function is used to allocate an output MV buffer. The size of the mvBuffer is + * dependent on the frame height and width of the last ::NvEncCreateInputBuffer() call. + * The NV_ENC_OUTPUT_PTR returned by the NvEncodeAPI interface in the + * ::NV_ENC_CREATE_MV_BUFFER::mvBuffer field should be used in + * ::NvEncRunMotionEstimationOnly() API. + * Client must lock ::NV_ENC_CREATE_MV_BUFFER::mvBuffer using ::NvEncLockBitstream() API to get the motion vector data. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] createMVBufferParams + * Pointer to the ::NV_ENC_CREATE_MV_BUFFER structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncCreateMVBuffer (void* encoder, NV_ENC_CREATE_MV_BUFFER* createMVBufferParams); + + +// NvEncDestroyMVBuffer +/** + * \brief Release an output MV buffer for ME only mode. + * + * This function is used to release the output MV buffer allocated using + * the ::NvEncCreateMVBuffer() function. The client must release the output + * mvBuffer using this function before destroying the encoder session. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] mvBuffer + * Pointer to the mvBuffer being released. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncDestroyMVBuffer (void* encoder, NV_ENC_OUTPUT_PTR mvBuffer); + + +// NvEncRunMotionEstimationOnly +/** + * \brief Submit an input picture and reference frame for motion estimation in ME only mode. + * + * This function is used to submit the input frame and reference frame for motion + * estimation. The ME parameters are passed using *meOnlyParams which is a pointer + * to ::_NV_ENC_MEONLY_PARAMS structure. + * Client must lock ::NV_ENC_CREATE_MV_BUFFER::mvBuffer using ::NvEncLockBitstream() API to get the motion vector data. + * to get motion vector data. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] meOnlyParams + * Pointer to the ::_NV_ENC_MEONLY_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_NEED_MORE_INPUT \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncRunMotionEstimationOnly (void* encoder, NV_ENC_MEONLY_PARAMS* meOnlyParams); + +// NvEncodeAPIGetMaxSupportedVersion +/** + * \brief Get the largest NvEncodeAPI version supported by the driver. + * + * This function can be used by clients to determine if the driver supports + * the NvEncodeAPI header the application was compiled with. + * + * \param [out] version + * Pointer to the requested value. The 4 least significant bits in the returned + * indicate the minor version and the rest of the bits indicate the major + * version of the largest supported version. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + */ +NVENCSTATUS NVENCAPI NvEncodeAPIGetMaxSupportedVersion (uint32_t* version); + + +/// \cond API PFN +/* + * Defines API function pointers + */ +typedef NVENCSTATUS (NVENCAPI* PNVENCOPENENCODESESSION) (void* device, uint32_t deviceType, void** encoder); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEGUIDCOUNT) (void* encoder, uint32_t* encodeGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEGUIDS) (void* encoder, GUID* GUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPROFILEGUIDCOUNT) (void* encoder, GUID encodeGUID, uint32_t* encodeProfileGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPROFILEGUIDS) (void* encoder, GUID encodeGUID, GUID* profileGUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETINPUTFORMATCOUNT) (void* encoder, GUID encodeGUID, uint32_t* inputFmtCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETINPUTFORMATS) (void* encoder, GUID encodeGUID, NV_ENC_BUFFER_FORMAT* inputFmts, uint32_t inputFmtArraySize, uint32_t* inputFmtCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODECAPS) (void* encoder, GUID encodeGUID, NV_ENC_CAPS_PARAM* capsParam, int* capsVal); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCOUNT) (void* encoder, GUID encodeGUID, uint32_t* encodePresetGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETGUIDS) (void* encoder, GUID encodeGUID, GUID* presetGUIDs, uint32_t guidArraySize, uint32_t* encodePresetGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCONFIG) (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_PRESET_CONFIG* presetConfig); +typedef NVENCSTATUS (NVENCAPI* PNVENCINITIALIZEENCODER) (void* encoder, NV_ENC_INITIALIZE_PARAMS* createEncodeParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEINPUTBUFFER) (void* encoder, NV_ENC_CREATE_INPUT_BUFFER* createInputBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYINPUTBUFFER) (void* encoder, NV_ENC_INPUT_PTR inputBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEBITSTREAMBUFFER) (void* encoder, NV_ENC_CREATE_BITSTREAM_BUFFER* createBitstreamBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYBITSTREAMBUFFER) (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCENCODEPICTURE) (void* encoder, NV_ENC_PIC_PARAMS* encodePicParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCLOCKBITSTREAM) (void* encoder, NV_ENC_LOCK_BITSTREAM* lockBitstreamBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNLOCKBITSTREAM) (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCLOCKINPUTBUFFER) (void* encoder, NV_ENC_LOCK_INPUT_BUFFER* lockInputBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNLOCKINPUTBUFFER) (void* encoder, NV_ENC_INPUT_PTR inputBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODESTATS) (void* encoder, NV_ENC_STAT* encodeStats); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETSEQUENCEPARAMS) (void* encoder, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload); +typedef NVENCSTATUS (NVENCAPI* PNVENCREGISTERASYNCEVENT) (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNREGISTERASYNCEVENT) (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCMAPINPUTRESOURCE) (void* encoder, NV_ENC_MAP_INPUT_RESOURCE* mapInputResParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNMAPINPUTRESOURCE) (void* encoder, NV_ENC_INPUT_PTR mappedInputBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYENCODER) (void* encoder); +typedef NVENCSTATUS (NVENCAPI* PNVENCINVALIDATEREFFRAMES) (void* encoder, uint64_t invalidRefFrameTimeStamp); +typedef NVENCSTATUS (NVENCAPI* PNVENCOPENENCODESESSIONEX) (NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS *openSessionExParams, void** encoder); +typedef NVENCSTATUS (NVENCAPI* PNVENCREGISTERRESOURCE) (void* encoder, NV_ENC_REGISTER_RESOURCE* registerResParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNREGISTERRESOURCE) (void* encoder, NV_ENC_REGISTERED_PTR registeredRes); +typedef NVENCSTATUS (NVENCAPI* PNVENCRECONFIGUREENCODER) (void* encoder, NV_ENC_RECONFIGURE_PARAMS* reInitEncodeParams); + +typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEMVBUFFER) (void* encoder, NV_ENC_CREATE_MV_BUFFER* createMVBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYMVBUFFER) (void* encoder, NV_ENC_OUTPUT_PTR mvBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCRUNMOTIONESTIMATIONONLY) (void* encoder, NV_ENC_MEONLY_PARAMS* meOnlyParams); + + +/// \endcond + + +/** @} */ /* END ENCODE_FUNC */ + +/** + * \ingroup ENCODER_STRUCTURE + * NV_ENCODE_API_FUNCTION_LIST + */ +typedef struct _NV_ENCODE_API_FUNCTION_LIST +{ + uint32_t version; /**< [in]: Client should pass NV_ENCODE_API_FUNCTION_LIST_VER. */ + uint32_t reserved; /**< [in]: Reserved and should be set to 0. */ + PNVENCOPENENCODESESSION nvEncOpenEncodeSession; /**< [out]: Client should access ::NvEncOpenEncodeSession() API through this pointer. */ + PNVENCGETENCODEGUIDCOUNT nvEncGetEncodeGUIDCount; /**< [out]: Client should access ::NvEncGetEncodeGUIDCount() API through this pointer. */ + PNVENCGETENCODEPRESETCOUNT nvEncGetEncodeProfileGUIDCount; /**< [out]: Client should access ::NvEncGetEncodeProfileGUIDCount() API through this pointer.*/ + PNVENCGETENCODEPRESETGUIDS nvEncGetEncodeProfileGUIDs; /**< [out]: Client should access ::NvEncGetEncodeProfileGUIDs() API through this pointer. */ + PNVENCGETENCODEGUIDS nvEncGetEncodeGUIDs; /**< [out]: Client should access ::NvEncGetEncodeGUIDs() API through this pointer. */ + PNVENCGETINPUTFORMATCOUNT nvEncGetInputFormatCount; /**< [out]: Client should access ::NvEncGetInputFormatCount() API through this pointer. */ + PNVENCGETINPUTFORMATS nvEncGetInputFormats; /**< [out]: Client should access ::NvEncGetInputFormats() API through this pointer. */ + PNVENCGETENCODECAPS nvEncGetEncodeCaps; /**< [out]: Client should access ::NvEncGetEncodeCaps() API through this pointer. */ + PNVENCGETENCODEPRESETCOUNT nvEncGetEncodePresetCount; /**< [out]: Client should access ::NvEncGetEncodePresetCount() API through this pointer. */ + PNVENCGETENCODEPRESETGUIDS nvEncGetEncodePresetGUIDs; /**< [out]: Client should access ::NvEncGetEncodePresetGUIDs() API through this pointer. */ + PNVENCGETENCODEPRESETCONFIG nvEncGetEncodePresetConfig; /**< [out]: Client should access ::NvEncGetEncodePresetConfig() API through this pointer. */ + PNVENCINITIALIZEENCODER nvEncInitializeEncoder; /**< [out]: Client should access ::NvEncInitializeEncoder() API through this pointer. */ + PNVENCCREATEINPUTBUFFER nvEncCreateInputBuffer; /**< [out]: Client should access ::NvEncCreateInputBuffer() API through this pointer. */ + PNVENCDESTROYINPUTBUFFER nvEncDestroyInputBuffer; /**< [out]: Client should access ::NvEncDestroyInputBuffer() API through this pointer. */ + PNVENCCREATEBITSTREAMBUFFER nvEncCreateBitstreamBuffer; /**< [out]: Client should access ::NvEncCreateBitstreamBuffer() API through this pointer. */ + PNVENCDESTROYBITSTREAMBUFFER nvEncDestroyBitstreamBuffer; /**< [out]: Client should access ::NvEncDestroyBitstreamBuffer() API through this pointer. */ + PNVENCENCODEPICTURE nvEncEncodePicture; /**< [out]: Client should access ::NvEncEncodePicture() API through this pointer. */ + PNVENCLOCKBITSTREAM nvEncLockBitstream; /**< [out]: Client should access ::NvEncLockBitstream() API through this pointer. */ + PNVENCUNLOCKBITSTREAM nvEncUnlockBitstream; /**< [out]: Client should access ::NvEncUnlockBitstream() API through this pointer. */ + PNVENCLOCKINPUTBUFFER nvEncLockInputBuffer; /**< [out]: Client should access ::NvEncLockInputBuffer() API through this pointer. */ + PNVENCUNLOCKINPUTBUFFER nvEncUnlockInputBuffer; /**< [out]: Client should access ::NvEncUnlockInputBuffer() API through this pointer. */ + PNVENCGETENCODESTATS nvEncGetEncodeStats; /**< [out]: Client should access ::NvEncGetEncodeStats() API through this pointer. */ + PNVENCGETSEQUENCEPARAMS nvEncGetSequenceParams; /**< [out]: Client should access ::NvEncGetSequenceParams() API through this pointer. */ + PNVENCREGISTERASYNCEVENT nvEncRegisterAsyncEvent; /**< [out]: Client should access ::NvEncRegisterAsyncEvent() API through this pointer. */ + PNVENCUNREGISTERASYNCEVENT nvEncUnregisterAsyncEvent; /**< [out]: Client should access ::NvEncUnregisterAsyncEvent() API through this pointer. */ + PNVENCMAPINPUTRESOURCE nvEncMapInputResource; /**< [out]: Client should access ::NvEncMapInputResource() API through this pointer. */ + PNVENCUNMAPINPUTRESOURCE nvEncUnmapInputResource; /**< [out]: Client should access ::NvEncUnmapInputResource() API through this pointer. */ + PNVENCDESTROYENCODER nvEncDestroyEncoder; /**< [out]: Client should access ::NvEncDestroyEncoder() API through this pointer. */ + PNVENCINVALIDATEREFFRAMES nvEncInvalidateRefFrames; /**< [out]: Client should access ::NvEncInvalidateRefFrames() API through this pointer. */ + PNVENCOPENENCODESESSIONEX nvEncOpenEncodeSessionEx; /**< [out]: Client should access ::NvEncOpenEncodeSession() API through this pointer. */ + PNVENCREGISTERRESOURCE nvEncRegisterResource; /**< [out]: Client should access ::NvEncRegisterResource() API through this pointer. */ + PNVENCUNREGISTERRESOURCE nvEncUnregisterResource; /**< [out]: Client should access ::NvEncUnregisterResource() API through this pointer. */ + PNVENCRECONFIGUREENCODER nvEncReconfigureEncoder; /**< [out]: Client should access ::NvEncReconfigureEncoder() API through this pointer. */ + void* reserved1; + PNVENCCREATEMVBUFFER nvEncCreateMVBuffer; /**< [out]: Client should access ::NvEncCreateMVBuffer API through this pointer. */ + PNVENCDESTROYMVBUFFER nvEncDestroyMVBuffer; /**< [out]: Client should access ::NvEncDestroyMVBuffer API through this pointer. */ + PNVENCRUNMOTIONESTIMATIONONLY nvEncRunMotionEstimationOnly; /**< [out]: Client should access ::NvEncRunMotionEstimationOnly API through this pointer. */ + void* reserved2[281]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENCODE_API_FUNCTION_LIST; + +/** Macro for constructing the version field of ::_NV_ENCODEAPI_FUNCTION_LIST. */ +#define NV_ENCODE_API_FUNCTION_LIST_VER NVENCAPI_STRUCT_VERSION(2) + +// NvEncodeAPICreateInstance +/** + * \ingroup ENCODE_FUNC + * Entry Point to the NvEncodeAPI interface. + * + * Creates an instance of the NvEncodeAPI interface, and populates the + * pFunctionList with function pointers to the API routines implemented by the + * NvEncodeAPI interface. + * + * \param [out] functionList + * + * \return + * ::NV_ENC_SUCCESS + * ::NV_ENC_ERR_INVALID_PTR + */ +NVENCSTATUS NVENCAPI NvEncodeAPICreateInstance(NV_ENCODE_API_FUNCTION_LIST *functionList); + +#ifdef __cplusplus +} +#endif + + +#endif + diff --git a/compat/os2threads.h b/compat/os2threads.h new file mode 100644 index 0000000..40a119f --- /dev/null +++ b/compat/os2threads.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2011 KO Myung-Hun <komh@chollian.net> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * os2threads to pthreads wrapper + */ + +#ifndef COMPAT_OS2THREADS_H +#define COMPAT_OS2THREADS_H + +#define INCL_DOS +#include <os2.h> + +#undef __STRICT_ANSI__ /* for _beginthread() */ +#include <stdlib.h> + +#include <sys/builtin.h> +#include <sys/fmutex.h> + +#include "libavutil/attributes.h" + +typedef struct { + TID tid; + void *(*start_routine)(void *); + void *arg; + void *result; +} pthread_t; + +typedef void pthread_attr_t; + +typedef HMTX pthread_mutex_t; +typedef void pthread_mutexattr_t; + +typedef struct { + HEV event_sem; + HEV ack_sem; + volatile unsigned wait_count; +} pthread_cond_t; + +typedef void pthread_condattr_t; + +typedef struct { + volatile int done; + _fmutex mtx; +} pthread_once_t; + +#define PTHREAD_ONCE_INIT {0, _FMUTEX_INITIALIZER} + +static void thread_entry(void *arg) +{ + pthread_t *thread = arg; + + thread->result = thread->start_routine(thread->arg); +} + +static av_always_inline int pthread_create(pthread_t *thread, + const pthread_attr_t *attr, + void *(*start_routine)(void*), + void *arg) +{ + thread->start_routine = start_routine; + thread->arg = arg; + thread->result = NULL; + + thread->tid = _beginthread(thread_entry, NULL, 1024 * 1024, thread); + + return 0; +} + +static av_always_inline int pthread_join(pthread_t thread, void **value_ptr) +{ + DosWaitThread(&thread.tid, DCWW_WAIT); + + if (value_ptr) + *value_ptr = thread.result; + + return 0; +} + +static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, + const pthread_mutexattr_t *attr) +{ + DosCreateMutexSem(NULL, (PHMTX)mutex, 0, FALSE); + + return 0; +} + +static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex) +{ + DosCloseMutexSem(*(PHMTX)mutex); + + return 0; +} + +static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex) +{ + DosRequestMutexSem(*(PHMTX)mutex, SEM_INDEFINITE_WAIT); + + return 0; +} + +static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex) +{ + DosReleaseMutexSem(*(PHMTX)mutex); + + return 0; +} + +static av_always_inline int pthread_cond_init(pthread_cond_t *cond, + const pthread_condattr_t *attr) +{ + DosCreateEventSem(NULL, &cond->event_sem, DCE_POSTONE, FALSE); + DosCreateEventSem(NULL, &cond->ack_sem, DCE_POSTONE, FALSE); + + cond->wait_count = 0; + + return 0; +} + +static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond) +{ + DosCloseEventSem(cond->event_sem); + DosCloseEventSem(cond->ack_sem); + + return 0; +} + +static av_always_inline int pthread_cond_signal(pthread_cond_t *cond) +{ + if (!__atomic_cmpxchg32(&cond->wait_count, 0, 0)) { + DosPostEventSem(cond->event_sem); + DosWaitEventSem(cond->ack_sem, SEM_INDEFINITE_WAIT); + } + + return 0; +} + +static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond) +{ + while (!__atomic_cmpxchg32(&cond->wait_count, 0, 0)) + pthread_cond_signal(cond); + + return 0; +} + +static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, + pthread_mutex_t *mutex) +{ + __atomic_increment(&cond->wait_count); + + pthread_mutex_unlock(mutex); + + DosWaitEventSem(cond->event_sem, SEM_INDEFINITE_WAIT); + + __atomic_decrement(&cond->wait_count); + + DosPostEventSem(cond->ack_sem); + + pthread_mutex_lock(mutex); + + return 0; +} + +static av_always_inline int pthread_once(pthread_once_t *once_control, + void (*init_routine)(void)) +{ + if (!once_control->done) + { + _fmutex_request(&once_control->mtx, 0); + + if (!once_control->done) + { + init_routine(); + + once_control->done = 1; + } + + _fmutex_release(&once_control->mtx); + } + + return 0; +} +#endif /* COMPAT_OS2THREADS_H */ diff --git a/compat/plan9/main.c b/compat/plan9/main.c index 97d7067..d46f96d 100644 --- a/compat/plan9/main.c +++ b/compat/plan9/main.c @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/compat/solaris/make_sunver.pl b/compat/solaris/make_sunver.pl index 929bdda..0e9ed1d 100755 --- a/compat/solaris/make_sunver.pl +++ b/compat/solaris/make_sunver.pl @@ -1,4 +1,4 @@ -#!/usr/bin/perl -w +#!/usr/bin/env perl # make_sunver.pl # diff --git a/compat/strtod.c b/compat/strtod.c index 258909f..3a9452e 100644 --- a/compat/strtod.c +++ b/compat/strtod.c @@ -2,20 +2,20 @@ * C99-compatible strtod() implementation * Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/compat/tms470/math.h b/compat/tms470/math.h index b686d4d..0a42743 100644 --- a/compat/tms470/math.h +++ b/compat/tms470/math.h @@ -1,23 +1,23 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifndef LIBAV_COMPAT_TMS470_MATH_H -#define LIBAV_COMPAT_TMS470_MATH_H +#ifndef COMPAT_TMS470_MATH_H +#define COMPAT_TMS470_MATH_H #include_next <math.h> @@ -27,4 +27,4 @@ #define INFINITY (*(const float*)((const unsigned []){ 0x7f800000 })) #define NAN (*(const float*)((const unsigned []){ 0x7fc00000 })) -#endif /* LIBAV_COMPAT_TMS470_MATH_H */ +#endif /* COMPAT_TMS470_MATH_H */ diff --git a/compat/va_copy.h b/compat/va_copy.h new file mode 100644 index 0000000..a40bbe6 --- /dev/null +++ b/compat/va_copy.h @@ -0,0 +1,34 @@ +/* + * MSVC Compatible va_copy macro + * Copyright (c) 2012 Derek Buitenhuis + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef COMPAT_VA_COPY_H +#define COMPAT_VA_COPY_H + +#include <stdarg.h> + +#if !defined(va_copy) && defined(_MSC_VER) +#define va_copy(dst, src) ((dst) = (src)) +#endif +#if !defined(va_copy) && defined(__GNUC__) && __GNUC__ < 3 +#define va_copy(dst, src) __va_copy(dst, src) +#endif + +#endif /* COMPAT_VA_COPY_H */ diff --git a/compat/w32dlfcn.h b/compat/w32dlfcn.h new file mode 100644 index 0000000..bc9bb8c --- /dev/null +++ b/compat/w32dlfcn.h @@ -0,0 +1,83 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef COMPAT_W32DLFCN_H +#define COMPAT_W32DLFCN_H + +#ifdef _WIN32 +#include <windows.h> +#if _WIN32_WINNT < 0x0602 +#include "libavutil/wchar_filename.h" +#endif +/** + * Safe function used to open dynamic libs. This attempts to improve program security + * by removing the current directory from the dll search path. Only dll's found in the + * executable or system directory are allowed to be loaded. + * @param name The dynamic lib name. + * @return A handle to the opened lib. + */ +static inline HMODULE win32_dlopen(const char *name) +{ +#if _WIN32_WINNT < 0x0602 + // Need to check if KB2533623 is available + if (!GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "SetDefaultDllDirectories")) { + HMODULE module = NULL; + wchar_t *path = NULL, *name_w = NULL; + DWORD pathlen; + if (utf8towchar(name, &name_w)) + goto exit; + path = (wchar_t *)av_mallocz_array(MAX_PATH, sizeof(wchar_t)); + // Try local directory first + pathlen = GetModuleFileNameW(NULL, path, MAX_PATH); + pathlen = wcsrchr(path, '\\') - path; + if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH) + goto exit; + path[pathlen] = '\\'; + wcscpy(path + pathlen + 1, name_w); + module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + if (module == NULL) { + // Next try System32 directory + pathlen = GetSystemDirectoryW(path, MAX_PATH); + if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH) + goto exit; + path[pathlen] = '\\'; + wcscpy(path + pathlen + 1, name_w); + module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + } +exit: + av_free(path); + av_free(name_w); + return module; + } +#endif +#ifndef LOAD_LIBRARY_SEARCH_APPLICATION_DIR +# define LOAD_LIBRARY_SEARCH_APPLICATION_DIR 0x00000200 +#endif +#ifndef LOAD_LIBRARY_SEARCH_SYSTEM32 +# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800 +#endif + return LoadLibraryExA(name, NULL, LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32); +} +#define dlopen(name, flags) win32_dlopen(name) +#define dlclose FreeLibrary +#define dlsym GetProcAddress +#else +#include <dlfcn.h> +#endif + +#endif /* COMPAT_W32DLFCN_H */ diff --git a/compat/w32pthreads.h b/compat/w32pthreads.h index f38f767..0c9a7fa 100644 --- a/compat/w32pthreads.h +++ b/compat/w32pthreads.h @@ -4,20 +4,20 @@ * Authors: Steven Walters <kemuri9@gmail.com> * Pegasys Inc. <http://www.pegasys-inc.com> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -26,8 +26,8 @@ * w32threads to pthreads wrapper */ -#ifndef LIBAV_COMPAT_W32PTHREADS_H -#define LIBAV_COMPAT_W32PTHREADS_H +#ifndef COMPAT_W32PTHREADS_H +#define COMPAT_W32PTHREADS_H /* Build up a pthread-like API using underlying Windows API. Have only static * methods so as to not conflict with a potentially linked in pthread-win32 @@ -45,6 +45,7 @@ #endif #include "libavutil/attributes.h" +#include "libavutil/common.h" #include "libavutil/internal.h" #include "libavutil/mem.h" @@ -86,19 +87,29 @@ static av_unused int pthread_create(pthread_t *thread, const void *unused_attr, { thread->func = start_routine; thread->arg = arg; +#if HAVE_WINRT + thread->handle = (void*)CreateThread(NULL, 0, win32thread_worker, thread, + 0, NULL); +#else thread->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, thread, 0, NULL); +#endif return !thread->handle; } -static av_unused void pthread_join(pthread_t thread, void **value_ptr) +static av_unused int pthread_join(pthread_t thread, void **value_ptr) { DWORD ret = WaitForSingleObject(thread.handle, INFINITE); - if (ret != WAIT_OBJECT_0) - return; + if (ret != WAIT_OBJECT_0) { + if (ret == WAIT_ABANDONED) + return EINVAL; + else + return EDEADLK; + } if (value_ptr) *value_ptr = thread.ret; CloseHandle(thread.handle); + return 0; } static inline int pthread_mutex_init(pthread_mutex_t *m, void* attr) @@ -123,7 +134,6 @@ static inline int pthread_mutex_unlock(pthread_mutex_t *m) } #if _WIN32_WINNT >= 0x0600 - typedef INIT_ONCE pthread_once_t; #define PTHREAD_ONCE_INIT INIT_ONCE_STATIC_INIT @@ -137,20 +147,22 @@ static av_unused int pthread_once(pthread_once_t *once_control, void (*init_rout return 0; } -static inline void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr) +static inline int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr) { InitializeConditionVariable(cond); + return 0; } /* native condition variables do not destroy */ -static inline void pthread_cond_destroy(pthread_cond_t *cond) +static inline int pthread_cond_destroy(pthread_cond_t *cond) { - return; + return 0; } -static inline void pthread_cond_broadcast(pthread_cond_t *cond) +static inline int pthread_cond_broadcast(pthread_cond_t *cond) { WakeAllConditionVariable(cond); + return 0; } static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) @@ -159,9 +171,10 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex return 0; } -static inline void pthread_cond_signal(pthread_cond_t *cond) +static inline int pthread_cond_signal(pthread_cond_t *cond) { WakeConditionVariable(cond); + return 0; } #else // _WIN32_WINNT < 0x0600 @@ -245,7 +258,7 @@ static void (WINAPI *cond_signal)(pthread_cond_t *cond); static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex, DWORD milliseconds); -static av_unused void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr) +static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr) { win32_cond_t *win32_cond = NULL; @@ -253,31 +266,32 @@ static av_unused void pthread_cond_init(pthread_cond_t *cond, const void *unused if (cond_init) { cond_init(cond); - return; + return 0; } /* non native condition variables */ win32_cond = av_mallocz(sizeof(win32_cond_t)); if (!win32_cond) - return; + return ENOMEM; cond->Ptr = win32_cond; win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); if (!win32_cond->semaphore) - return; + return ENOMEM; win32_cond->waiters_done = CreateEvent(NULL, TRUE, FALSE, NULL); if (!win32_cond->waiters_done) - return; + return ENOMEM; pthread_mutex_init(&win32_cond->mtx_waiter_count, NULL); pthread_mutex_init(&win32_cond->mtx_broadcast, NULL); + return 0; } -static av_unused void pthread_cond_destroy(pthread_cond_t *cond) +static av_unused int pthread_cond_destroy(pthread_cond_t *cond) { win32_cond_t *win32_cond = cond->Ptr; /* native condition variables do not destroy */ if (cond_init) - return; + return 0; /* non native condition variables */ CloseHandle(win32_cond->semaphore); @@ -286,16 +300,17 @@ static av_unused void pthread_cond_destroy(pthread_cond_t *cond) pthread_mutex_destroy(&win32_cond->mtx_broadcast); av_freep(&win32_cond); cond->Ptr = NULL; + return 0; } -static av_unused void pthread_cond_broadcast(pthread_cond_t *cond) +static av_unused int pthread_cond_broadcast(pthread_cond_t *cond) { win32_cond_t *win32_cond = cond->Ptr; int have_waiter; if (cond_broadcast) { cond_broadcast(cond); - return; + return 0; } /* non native condition variables */ @@ -317,6 +332,7 @@ static av_unused void pthread_cond_broadcast(pthread_cond_t *cond) } else pthread_mutex_unlock(&win32_cond->mtx_waiter_count); pthread_mutex_unlock(&win32_cond->mtx_broadcast); + return 0; } static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) @@ -351,13 +367,13 @@ static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mu return pthread_mutex_lock(mutex); } -static av_unused void pthread_cond_signal(pthread_cond_t *cond) +static av_unused int pthread_cond_signal(pthread_cond_t *cond) { win32_cond_t *win32_cond = cond->Ptr; int have_waiter; if (cond_signal) { cond_signal(cond); - return; + return 0; } pthread_mutex_lock(&win32_cond->mtx_broadcast); @@ -374,6 +390,7 @@ static av_unused void pthread_cond_signal(pthread_cond_t *cond) } pthread_mutex_unlock(&win32_cond->mtx_broadcast); + return 0; } #endif @@ -398,4 +415,4 @@ static av_unused void w32thread_init(void) } -#endif /* LIBAV_COMPAT_W32PTHREADS_H */ +#endif /* COMPAT_W32PTHREADS_H */ |